xref: /freebsd/sys/dev/ixgbe/ixgbe_common.c (revision 87b759f0fa1f7554d50ce640c40138512bbded44)
1 /******************************************************************************
2   SPDX-License-Identifier: BSD-3-Clause
3 
4   Copyright (c) 2001-2020, Intel Corporation
5   All rights reserved.
6 
7   Redistribution and use in source and binary forms, with or without
8   modification, are permitted provided that the following conditions are met:
9 
10    1. Redistributions of source code must retain the above copyright notice,
11       this list of conditions and the following disclaimer.
12 
13    2. Redistributions in binary form must reproduce the above copyright
14       notice, this list of conditions and the following disclaimer in the
15       documentation and/or other materials provided with the distribution.
16 
17    3. Neither the name of the Intel Corporation nor the names of its
18       contributors may be used to endorse or promote products derived from
19       this software without specific prior written permission.
20 
21   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31   POSSIBILITY OF SUCH DAMAGE.
32 
33 ******************************************************************************/
34 
35 #include "ixgbe_common.h"
36 #include "ixgbe_phy.h"
37 #include "ixgbe_dcb.h"
38 #include "ixgbe_dcb_82599.h"
39 #include "ixgbe_api.h"
40 
41 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
42 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
43 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
44 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
45 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
46 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
47 					u16 count);
48 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
49 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
50 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
51 static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
52 
53 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
54 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
55 					 u16 *san_mac_offset);
56 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
57 					     u16 words, u16 *data);
58 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
59 					      u16 words, u16 *data);
60 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
61 						 u16 offset);
62 
63 /**
64  * ixgbe_init_ops_generic - Inits function ptrs
65  * @hw: pointer to the hardware structure
66  *
67  * Initialize the function pointers.
68  **/
69 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
70 {
71 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
72 	struct ixgbe_mac_info *mac = &hw->mac;
73 	u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
74 
75 	DEBUGFUNC("ixgbe_init_ops_generic");
76 
77 	/* EEPROM */
78 	eeprom->ops.init_params = ixgbe_init_eeprom_params_generic;
79 	/* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
80 	if (eec & IXGBE_EEC_PRES) {
81 		eeprom->ops.read = ixgbe_read_eerd_generic;
82 		eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic;
83 	} else {
84 		eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic;
85 		eeprom->ops.read_buffer =
86 				 ixgbe_read_eeprom_buffer_bit_bang_generic;
87 	}
88 	eeprom->ops.write = ixgbe_write_eeprom_generic;
89 	eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic;
90 	eeprom->ops.validate_checksum =
91 				      ixgbe_validate_eeprom_checksum_generic;
92 	eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic;
93 	eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic;
94 
95 	/* MAC */
96 	mac->ops.init_hw = ixgbe_init_hw_generic;
97 	mac->ops.reset_hw = NULL;
98 	mac->ops.start_hw = ixgbe_start_hw_generic;
99 	mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic;
100 	mac->ops.get_media_type = NULL;
101 	mac->ops.get_supported_physical_layer = NULL;
102 	mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic;
103 	mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic;
104 	mac->ops.stop_adapter = ixgbe_stop_adapter_generic;
105 	mac->ops.get_bus_info = ixgbe_get_bus_info_generic;
106 	mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie;
107 	mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync;
108 	mac->ops.release_swfw_sync = ixgbe_release_swfw_sync;
109 	mac->ops.prot_autoc_read = prot_autoc_read_generic;
110 	mac->ops.prot_autoc_write = prot_autoc_write_generic;
111 
112 	/* LEDs */
113 	mac->ops.led_on = ixgbe_led_on_generic;
114 	mac->ops.led_off = ixgbe_led_off_generic;
115 	mac->ops.blink_led_start = ixgbe_blink_led_start_generic;
116 	mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic;
117 	mac->ops.init_led_link_act = ixgbe_init_led_link_act_generic;
118 
119 	/* RAR, Multicast, VLAN */
120 	mac->ops.set_rar = ixgbe_set_rar_generic;
121 	mac->ops.clear_rar = ixgbe_clear_rar_generic;
122 	mac->ops.insert_mac_addr = NULL;
123 	mac->ops.set_vmdq = NULL;
124 	mac->ops.clear_vmdq = NULL;
125 	mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic;
126 	mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic;
127 	mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic;
128 	mac->ops.enable_mc = ixgbe_enable_mc_generic;
129 	mac->ops.disable_mc = ixgbe_disable_mc_generic;
130 	mac->ops.clear_vfta = NULL;
131 	mac->ops.set_vfta = NULL;
132 	mac->ops.set_vlvf = NULL;
133 	mac->ops.init_uta_tables = NULL;
134 	mac->ops.enable_rx = ixgbe_enable_rx_generic;
135 	mac->ops.disable_rx = ixgbe_disable_rx_generic;
136 	mac->ops.toggle_txdctl = ixgbe_toggle_txdctl_generic;
137 
138 	/* Flow Control */
139 	mac->ops.fc_enable = ixgbe_fc_enable_generic;
140 	mac->ops.setup_fc = ixgbe_setup_fc_generic;
141 	mac->ops.fc_autoneg = ixgbe_fc_autoneg;
142 
143 	/* Link */
144 	mac->ops.get_link_capabilities = NULL;
145 	mac->ops.setup_link = NULL;
146 	mac->ops.check_link = NULL;
147 	mac->ops.dmac_config = NULL;
148 	mac->ops.dmac_update_tcs = NULL;
149 	mac->ops.dmac_config_tcs = NULL;
150 
151 	return IXGBE_SUCCESS;
152 }
153 
154 /**
155  * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
156  * of flow control
157  * @hw: pointer to hardware structure
158  *
159  * This function returns true if the device supports flow control
160  * autonegotiation, and false if it does not.
161  *
162  **/
163 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
164 {
165 	bool supported = false;
166 	ixgbe_link_speed speed;
167 	bool link_up;
168 
169 	DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
170 
171 	switch (hw->phy.media_type) {
172 	case ixgbe_media_type_fiber_fixed:
173 	case ixgbe_media_type_fiber_qsfp:
174 	case ixgbe_media_type_fiber:
175 		/* flow control autoneg block list */
176 		switch (hw->device_id) {
177 		case IXGBE_DEV_ID_X550EM_A_SFP:
178 		case IXGBE_DEV_ID_X550EM_A_SFP_N:
179 		case IXGBE_DEV_ID_X550EM_A_QSFP:
180 		case IXGBE_DEV_ID_X550EM_A_QSFP_N:
181 			supported = false;
182 			break;
183 		default:
184 			hw->mac.ops.check_link(hw, &speed, &link_up, false);
185 			/* if link is down, assume supported */
186 			if (link_up)
187 				supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
188 				true : false;
189 			else
190 				supported = true;
191 		}
192 
193 		break;
194 	case ixgbe_media_type_backplane:
195 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI)
196 			supported = false;
197 		else
198 			supported = true;
199 		break;
200 	case ixgbe_media_type_copper:
201 		/* only some copper devices support flow control autoneg */
202 		switch (hw->device_id) {
203 		case IXGBE_DEV_ID_82599_T3_LOM:
204 		case IXGBE_DEV_ID_X540T:
205 		case IXGBE_DEV_ID_X540T1:
206 		case IXGBE_DEV_ID_X540_BYPASS:
207 		case IXGBE_DEV_ID_X550T:
208 		case IXGBE_DEV_ID_X550T1:
209 		case IXGBE_DEV_ID_X550EM_X_10G_T:
210 		case IXGBE_DEV_ID_X550EM_A_10G_T:
211 		case IXGBE_DEV_ID_X550EM_A_1G_T:
212 		case IXGBE_DEV_ID_X550EM_A_1G_T_L:
213 			supported = true;
214 			break;
215 		default:
216 			supported = false;
217 		}
218 	default:
219 		break;
220 	}
221 
222 	if (!supported)
223 		ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
224 			      "Device %x does not support flow control autoneg",
225 			      hw->device_id);
226 
227 	return supported;
228 }
229 
230 /**
231  * ixgbe_setup_fc_generic - Set up flow control
232  * @hw: pointer to hardware structure
233  *
234  * Called at init time to set up flow control.
235  **/
236 s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
237 {
238 	s32 ret_val = IXGBE_SUCCESS;
239 	u32 reg = 0, reg_bp = 0;
240 	u16 reg_cu = 0;
241 	bool locked = false;
242 
243 	DEBUGFUNC("ixgbe_setup_fc_generic");
244 
245 	/* Validate the requested mode */
246 	if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
247 		ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
248 			   "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
249 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
250 		goto out;
251 	}
252 
253 	/*
254 	 * 10gig parts do not have a word in the EEPROM to determine the
255 	 * default flow control setting, so we explicitly set it to full.
256 	 */
257 	if (hw->fc.requested_mode == ixgbe_fc_default)
258 		hw->fc.requested_mode = ixgbe_fc_full;
259 
260 	/*
261 	 * Set up the 1G and 10G flow control advertisement registers so the
262 	 * HW will be able to do fc autoneg once the cable is plugged in.  If
263 	 * we link at 10G, the 1G advertisement is harmless and vice versa.
264 	 */
265 	switch (hw->phy.media_type) {
266 	case ixgbe_media_type_backplane:
267 		/* some MAC's need RMW protection on AUTOC */
268 		ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &reg_bp);
269 		if (ret_val != IXGBE_SUCCESS)
270 			goto out;
271 
272 		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
273 		break;
274 	case ixgbe_media_type_fiber_fixed:
275 	case ixgbe_media_type_fiber_qsfp:
276 	case ixgbe_media_type_fiber:
277 		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
278 
279 		break;
280 	case ixgbe_media_type_copper:
281 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
282 				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg_cu);
283 		break;
284 	default:
285 		break;
286 	}
287 
288 	/*
289 	 * The possible values of fc.requested_mode are:
290 	 * 0: Flow control is completely disabled
291 	 * 1: Rx flow control is enabled (we can receive pause frames,
292 	 *    but not send pause frames).
293 	 * 2: Tx flow control is enabled (we can send pause frames but
294 	 *    we do not support receiving pause frames).
295 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
296 	 * other: Invalid.
297 	 */
298 	switch (hw->fc.requested_mode) {
299 	case ixgbe_fc_none:
300 		/* Flow control completely disabled by software override. */
301 		reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
302 		if (hw->phy.media_type == ixgbe_media_type_backplane)
303 			reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
304 				    IXGBE_AUTOC_ASM_PAUSE);
305 		else if (hw->phy.media_type == ixgbe_media_type_copper)
306 			reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
307 		break;
308 	case ixgbe_fc_tx_pause:
309 		/*
310 		 * Tx Flow control is enabled, and Rx Flow control is
311 		 * disabled by software override.
312 		 */
313 		reg |= IXGBE_PCS1GANA_ASM_PAUSE;
314 		reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
315 		if (hw->phy.media_type == ixgbe_media_type_backplane) {
316 			reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
317 			reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
318 		} else if (hw->phy.media_type == ixgbe_media_type_copper) {
319 			reg_cu |= IXGBE_TAF_ASM_PAUSE;
320 			reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
321 		}
322 		break;
323 	case ixgbe_fc_rx_pause:
324 		/*
325 		 * Rx Flow control is enabled and Tx Flow control is
326 		 * disabled by software override. Since there really
327 		 * isn't a way to advertise that we are capable of RX
328 		 * Pause ONLY, we will advertise that we support both
329 		 * symmetric and asymmetric Rx PAUSE, as such we fall
330 		 * through to the fc_full statement.  Later, we will
331 		 * disable the adapter's ability to send PAUSE frames.
332 		 */
333 	case ixgbe_fc_full:
334 		/* Flow control (both Rx and Tx) is enabled by SW override. */
335 		reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
336 		if (hw->phy.media_type == ixgbe_media_type_backplane)
337 			reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
338 				  IXGBE_AUTOC_ASM_PAUSE;
339 		else if (hw->phy.media_type == ixgbe_media_type_copper)
340 			reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
341 		break;
342 	default:
343 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
344 			     "Flow control param set incorrectly\n");
345 		ret_val = IXGBE_ERR_CONFIG;
346 		goto out;
347 		break;
348 	}
349 
350 	if (hw->mac.type < ixgbe_mac_X540) {
351 		/*
352 		 * Enable auto-negotiation between the MAC & PHY;
353 		 * the MAC will advertise clause 37 flow control.
354 		 */
355 		IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
356 		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
357 
358 		/* Disable AN timeout */
359 		if (hw->fc.strict_ieee)
360 			reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
361 
362 		IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
363 		DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
364 	}
365 
366 	/*
367 	 * AUTOC restart handles negotiation of 1G and 10G on backplane
368 	 * and copper. There is no need to set the PCS1GCTL register.
369 	 *
370 	 */
371 	if (hw->phy.media_type == ixgbe_media_type_backplane) {
372 		reg_bp |= IXGBE_AUTOC_AN_RESTART;
373 		ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
374 		if (ret_val)
375 			goto out;
376 	} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
377 		    (ixgbe_device_supports_autoneg_fc(hw))) {
378 		hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
379 				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
380 	}
381 
382 	DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
383 out:
384 	return ret_val;
385 }
386 
387 /**
388  * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
389  * @hw: pointer to hardware structure
390  *
391  * Starts the hardware by filling the bus info structure and media type, clears
392  * all on chip counters, initializes receive address registers, multicast
393  * table, VLAN filter table, calls routine to set up link and flow control
394  * settings, and leaves transmit and receive units disabled and uninitialized
395  **/
396 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
397 {
398 	s32 ret_val;
399 	u32 ctrl_ext;
400 	u16 device_caps;
401 
402 	DEBUGFUNC("ixgbe_start_hw_generic");
403 
404 	/* Set the media type */
405 	hw->phy.media_type = hw->mac.ops.get_media_type(hw);
406 
407 	/* PHY ops initialization must be done in reset_hw() */
408 
409 	/* Clear the VLAN filter table */
410 	hw->mac.ops.clear_vfta(hw);
411 
412 	/* Clear statistics registers */
413 	hw->mac.ops.clear_hw_cntrs(hw);
414 
415 	/* Set No Snoop Disable */
416 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
417 	ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
418 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
419 	IXGBE_WRITE_FLUSH(hw);
420 
421 	/* Setup flow control */
422 	ret_val = ixgbe_setup_fc(hw);
423 	if (ret_val != IXGBE_SUCCESS && ret_val != IXGBE_NOT_IMPLEMENTED) {
424 		DEBUGOUT1("Flow control setup failed, returning %d\n", ret_val);
425 		return ret_val;
426 	}
427 
428 	/* Cache bit indicating need for crosstalk fix */
429 	switch (hw->mac.type) {
430 	case ixgbe_mac_82599EB:
431 	case ixgbe_mac_X550EM_x:
432 	case ixgbe_mac_X550EM_a:
433 		hw->mac.ops.get_device_caps(hw, &device_caps);
434 		if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR)
435 			hw->need_crosstalk_fix = false;
436 		else
437 			hw->need_crosstalk_fix = true;
438 		break;
439 	default:
440 		hw->need_crosstalk_fix = false;
441 		break;
442 	}
443 
444 	/* Clear adapter stopped flag */
445 	hw->adapter_stopped = false;
446 
447 	return IXGBE_SUCCESS;
448 }
449 
450 /**
451  * ixgbe_start_hw_gen2 - Init sequence for common device family
452  * @hw: pointer to hw structure
453  *
454  * Performs the init sequence common to the second generation
455  * of 10 GbE devices.
456  * Devices in the second generation:
457  *    82599
458  *    X540
459  **/
460 void ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
461 {
462 	u32 i;
463 	u32 regval;
464 
465 	/* Clear the rate limiters */
466 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
467 		IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
468 		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
469 	}
470 	IXGBE_WRITE_FLUSH(hw);
471 
472 	/* Disable relaxed ordering */
473 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
474 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
475 		regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
476 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
477 	}
478 
479 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
480 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
481 		regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
482 			    IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
483 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
484 	}
485 }
486 
487 /**
488  * ixgbe_init_hw_generic - Generic hardware initialization
489  * @hw: pointer to hardware structure
490  *
491  * Initialize the hardware by resetting the hardware, filling the bus info
492  * structure and media type, clears all on chip counters, initializes receive
493  * address registers, multicast table, VLAN filter table, calls routine to set
494  * up link and flow control settings, and leaves transmit and receive units
495  * disabled and uninitialized
496  **/
497 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
498 {
499 	s32 status;
500 
501 	DEBUGFUNC("ixgbe_init_hw_generic");
502 
503 	/* Reset the hardware */
504 	status = hw->mac.ops.reset_hw(hw);
505 
506 	if (status == IXGBE_SUCCESS || status == IXGBE_ERR_SFP_NOT_PRESENT) {
507 		/* Start the HW */
508 		status = hw->mac.ops.start_hw(hw);
509 	}
510 
511 	/* Initialize the LED link active for LED blink support */
512 	if (hw->mac.ops.init_led_link_act)
513 		hw->mac.ops.init_led_link_act(hw);
514 
515 	if (status != IXGBE_SUCCESS)
516 		DEBUGOUT1("Failed to initialize HW, STATUS = %d\n", status);
517 
518 	return status;
519 }
520 
521 /**
522  * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
523  * @hw: pointer to hardware structure
524  *
525  * Clears all hardware statistics counters by reading them from the hardware
526  * Statistics counters are clear on read.
527  **/
528 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
529 {
530 	u16 i = 0;
531 
532 	DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
533 
534 	IXGBE_READ_REG(hw, IXGBE_CRCERRS);
535 	IXGBE_READ_REG(hw, IXGBE_ILLERRC);
536 	IXGBE_READ_REG(hw, IXGBE_ERRBC);
537 	IXGBE_READ_REG(hw, IXGBE_MSPDC);
538 	for (i = 0; i < 8; i++)
539 		IXGBE_READ_REG(hw, IXGBE_MPC(i));
540 
541 	IXGBE_READ_REG(hw, IXGBE_MLFC);
542 	IXGBE_READ_REG(hw, IXGBE_MRFC);
543 	IXGBE_READ_REG(hw, IXGBE_RLEC);
544 	IXGBE_READ_REG(hw, IXGBE_LXONTXC);
545 	IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
546 	if (hw->mac.type >= ixgbe_mac_82599EB) {
547 		IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
548 		IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
549 	} else {
550 		IXGBE_READ_REG(hw, IXGBE_LXONRXC);
551 		IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
552 	}
553 
554 	for (i = 0; i < 8; i++) {
555 		IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
556 		IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
557 		if (hw->mac.type >= ixgbe_mac_82599EB) {
558 			IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
559 			IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
560 		} else {
561 			IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
562 			IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
563 		}
564 	}
565 	if (hw->mac.type >= ixgbe_mac_82599EB)
566 		for (i = 0; i < 8; i++)
567 			IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
568 	IXGBE_READ_REG(hw, IXGBE_PRC64);
569 	IXGBE_READ_REG(hw, IXGBE_PRC127);
570 	IXGBE_READ_REG(hw, IXGBE_PRC255);
571 	IXGBE_READ_REG(hw, IXGBE_PRC511);
572 	IXGBE_READ_REG(hw, IXGBE_PRC1023);
573 	IXGBE_READ_REG(hw, IXGBE_PRC1522);
574 	IXGBE_READ_REG(hw, IXGBE_GPRC);
575 	IXGBE_READ_REG(hw, IXGBE_BPRC);
576 	IXGBE_READ_REG(hw, IXGBE_MPRC);
577 	IXGBE_READ_REG(hw, IXGBE_GPTC);
578 	IXGBE_READ_REG(hw, IXGBE_GORCL);
579 	IXGBE_READ_REG(hw, IXGBE_GORCH);
580 	IXGBE_READ_REG(hw, IXGBE_GOTCL);
581 	IXGBE_READ_REG(hw, IXGBE_GOTCH);
582 	if (hw->mac.type == ixgbe_mac_82598EB)
583 		for (i = 0; i < 8; i++)
584 			IXGBE_READ_REG(hw, IXGBE_RNBC(i));
585 	IXGBE_READ_REG(hw, IXGBE_RUC);
586 	IXGBE_READ_REG(hw, IXGBE_RFC);
587 	IXGBE_READ_REG(hw, IXGBE_ROC);
588 	IXGBE_READ_REG(hw, IXGBE_RJC);
589 	IXGBE_READ_REG(hw, IXGBE_MNGPRC);
590 	IXGBE_READ_REG(hw, IXGBE_MNGPDC);
591 	IXGBE_READ_REG(hw, IXGBE_MNGPTC);
592 	IXGBE_READ_REG(hw, IXGBE_TORL);
593 	IXGBE_READ_REG(hw, IXGBE_TORH);
594 	IXGBE_READ_REG(hw, IXGBE_TPR);
595 	IXGBE_READ_REG(hw, IXGBE_TPT);
596 	IXGBE_READ_REG(hw, IXGBE_PTC64);
597 	IXGBE_READ_REG(hw, IXGBE_PTC127);
598 	IXGBE_READ_REG(hw, IXGBE_PTC255);
599 	IXGBE_READ_REG(hw, IXGBE_PTC511);
600 	IXGBE_READ_REG(hw, IXGBE_PTC1023);
601 	IXGBE_READ_REG(hw, IXGBE_PTC1522);
602 	IXGBE_READ_REG(hw, IXGBE_MPTC);
603 	IXGBE_READ_REG(hw, IXGBE_BPTC);
604 	for (i = 0; i < 16; i++) {
605 		IXGBE_READ_REG(hw, IXGBE_QPRC(i));
606 		IXGBE_READ_REG(hw, IXGBE_QPTC(i));
607 		if (hw->mac.type >= ixgbe_mac_82599EB) {
608 			IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
609 			IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
610 			IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
611 			IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
612 			IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
613 		} else {
614 			IXGBE_READ_REG(hw, IXGBE_QBRC(i));
615 			IXGBE_READ_REG(hw, IXGBE_QBTC(i));
616 		}
617 	}
618 
619 	if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
620 		if (hw->phy.id == 0)
621 			ixgbe_identify_phy(hw);
622 		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
623 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
624 		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
625 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
626 		hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
627 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
628 		hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
629 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
630 	}
631 
632 	return IXGBE_SUCCESS;
633 }
634 
635 /**
636  * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
637  * @hw: pointer to hardware structure
638  * @pba_num: stores the part number string from the EEPROM
639  * @pba_num_size: part number string buffer length
640  *
641  * Reads the part number string from the EEPROM.
642  **/
643 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
644 				  u32 pba_num_size)
645 {
646 	s32 ret_val;
647 	u16 data;
648 	u16 pba_ptr;
649 	u16 offset;
650 	u16 length;
651 
652 	DEBUGFUNC("ixgbe_read_pba_string_generic");
653 
654 	if (pba_num == NULL) {
655 		DEBUGOUT("PBA string buffer was null\n");
656 		return IXGBE_ERR_INVALID_ARGUMENT;
657 	}
658 
659 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
660 	if (ret_val) {
661 		DEBUGOUT("NVM Read Error\n");
662 		return ret_val;
663 	}
664 
665 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
666 	if (ret_val) {
667 		DEBUGOUT("NVM Read Error\n");
668 		return ret_val;
669 	}
670 
671 	/*
672 	 * if data is not ptr guard the PBA must be in legacy format which
673 	 * means pba_ptr is actually our second data word for the PBA number
674 	 * and we can decode it into an ascii string
675 	 */
676 	if (data != IXGBE_PBANUM_PTR_GUARD) {
677 		DEBUGOUT("NVM PBA number is not stored as string\n");
678 
679 		/* we will need 11 characters to store the PBA */
680 		if (pba_num_size < 11) {
681 			DEBUGOUT("PBA string buffer too small\n");
682 			return IXGBE_ERR_NO_SPACE;
683 		}
684 
685 		/* extract hex string from data and pba_ptr */
686 		pba_num[0] = (data >> 12) & 0xF;
687 		pba_num[1] = (data >> 8) & 0xF;
688 		pba_num[2] = (data >> 4) & 0xF;
689 		pba_num[3] = data & 0xF;
690 		pba_num[4] = (pba_ptr >> 12) & 0xF;
691 		pba_num[5] = (pba_ptr >> 8) & 0xF;
692 		pba_num[6] = '-';
693 		pba_num[7] = 0;
694 		pba_num[8] = (pba_ptr >> 4) & 0xF;
695 		pba_num[9] = pba_ptr & 0xF;
696 
697 		/* put a null character on the end of our string */
698 		pba_num[10] = '\0';
699 
700 		/* switch all the data but the '-' to hex char */
701 		for (offset = 0; offset < 10; offset++) {
702 			if (pba_num[offset] < 0xA)
703 				pba_num[offset] += '0';
704 			else if (pba_num[offset] < 0x10)
705 				pba_num[offset] += 'A' - 0xA;
706 		}
707 
708 		return IXGBE_SUCCESS;
709 	}
710 
711 	ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
712 	if (ret_val) {
713 		DEBUGOUT("NVM Read Error\n");
714 		return ret_val;
715 	}
716 
717 	if (length == 0xFFFF || length == 0 || length > hw->eeprom.word_size) {
718 		DEBUGOUT("NVM PBA number section invalid length\n");
719 		return IXGBE_ERR_PBA_SECTION;
720 	}
721 
722 	/* check if pba_num buffer is big enough */
723 	if (pba_num_size  < (((u32)length * 2) - 1)) {
724 		DEBUGOUT("PBA string buffer too small\n");
725 		return IXGBE_ERR_NO_SPACE;
726 	}
727 
728 	/* trim pba length from start of string */
729 	pba_ptr++;
730 	length--;
731 
732 	for (offset = 0; offset < length; offset++) {
733 		ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
734 		if (ret_val) {
735 			DEBUGOUT("NVM Read Error\n");
736 			return ret_val;
737 		}
738 		pba_num[offset * 2] = (u8)(data >> 8);
739 		pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
740 	}
741 	pba_num[offset * 2] = '\0';
742 
743 	return IXGBE_SUCCESS;
744 }
745 
746 /**
747  * ixgbe_read_pba_num_generic - Reads part number from EEPROM
748  * @hw: pointer to hardware structure
749  * @pba_num: stores the part number from the EEPROM
750  *
751  * Reads the part number from the EEPROM.
752  **/
753 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
754 {
755 	s32 ret_val;
756 	u16 data;
757 
758 	DEBUGFUNC("ixgbe_read_pba_num_generic");
759 
760 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
761 	if (ret_val) {
762 		DEBUGOUT("NVM Read Error\n");
763 		return ret_val;
764 	} else if (data == IXGBE_PBANUM_PTR_GUARD) {
765 		DEBUGOUT("NVM Not supported\n");
766 		return IXGBE_NOT_IMPLEMENTED;
767 	}
768 	*pba_num = (u32)(data << 16);
769 
770 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
771 	if (ret_val) {
772 		DEBUGOUT("NVM Read Error\n");
773 		return ret_val;
774 	}
775 	*pba_num |= (u32)data;
776 
777 	return IXGBE_SUCCESS;
778 }
779 
780 /**
781  * ixgbe_read_pba_raw
782  * @hw: pointer to the HW structure
783  * @eeprom_buf: optional pointer to EEPROM image
784  * @eeprom_buf_size: size of EEPROM image in words
785  * @max_pba_block_size: PBA block size limit
786  * @pba: pointer to output PBA structure
787  *
788  * Reads PBA from EEPROM image when eeprom_buf is not NULL.
789  * Reads PBA from physical EEPROM device when eeprom_buf is NULL.
790  *
791  **/
792 s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
793 		       u32 eeprom_buf_size, u16 max_pba_block_size,
794 		       struct ixgbe_pba *pba)
795 {
796 	s32 ret_val;
797 	u16 pba_block_size;
798 
799 	if (pba == NULL)
800 		return IXGBE_ERR_PARAM;
801 
802 	if (eeprom_buf == NULL) {
803 		ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
804 						     &pba->word[0]);
805 		if (ret_val)
806 			return ret_val;
807 	} else {
808 		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
809 			pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
810 			pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
811 		} else {
812 			return IXGBE_ERR_PARAM;
813 		}
814 	}
815 
816 	if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
817 		if (pba->pba_block == NULL)
818 			return IXGBE_ERR_PARAM;
819 
820 		ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
821 						   eeprom_buf_size,
822 						   &pba_block_size);
823 		if (ret_val)
824 			return ret_val;
825 
826 		if (pba_block_size > max_pba_block_size)
827 			return IXGBE_ERR_PARAM;
828 
829 		if (eeprom_buf == NULL) {
830 			ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
831 							     pba_block_size,
832 							     pba->pba_block);
833 			if (ret_val)
834 				return ret_val;
835 		} else {
836 			if (eeprom_buf_size > (u32)(pba->word[1] +
837 					      pba_block_size)) {
838 				memcpy(pba->pba_block,
839 				       &eeprom_buf[pba->word[1]],
840 				       pba_block_size * sizeof(u16));
841 			} else {
842 				return IXGBE_ERR_PARAM;
843 			}
844 		}
845 	}
846 
847 	return IXGBE_SUCCESS;
848 }
849 
850 /**
851  * ixgbe_write_pba_raw
852  * @hw: pointer to the HW structure
853  * @eeprom_buf: optional pointer to EEPROM image
854  * @eeprom_buf_size: size of EEPROM image in words
855  * @pba: pointer to PBA structure
856  *
857  * Writes PBA to EEPROM image when eeprom_buf is not NULL.
858  * Writes PBA to physical EEPROM device when eeprom_buf is NULL.
859  *
860  **/
861 s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
862 			u32 eeprom_buf_size, struct ixgbe_pba *pba)
863 {
864 	s32 ret_val;
865 
866 	if (pba == NULL)
867 		return IXGBE_ERR_PARAM;
868 
869 	if (eeprom_buf == NULL) {
870 		ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
871 						      &pba->word[0]);
872 		if (ret_val)
873 			return ret_val;
874 	} else {
875 		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
876 			eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
877 			eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
878 		} else {
879 			return IXGBE_ERR_PARAM;
880 		}
881 	}
882 
883 	if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
884 		if (pba->pba_block == NULL)
885 			return IXGBE_ERR_PARAM;
886 
887 		if (eeprom_buf == NULL) {
888 			ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
889 							      pba->pba_block[0],
890 							      pba->pba_block);
891 			if (ret_val)
892 				return ret_val;
893 		} else {
894 			if (eeprom_buf_size > (u32)(pba->word[1] +
895 					      pba->pba_block[0])) {
896 				memcpy(&eeprom_buf[pba->word[1]],
897 				       pba->pba_block,
898 				       pba->pba_block[0] * sizeof(u16));
899 			} else {
900 				return IXGBE_ERR_PARAM;
901 			}
902 		}
903 	}
904 
905 	return IXGBE_SUCCESS;
906 }
907 
908 /**
909  * ixgbe_get_pba_block_size
910  * @hw: pointer to the HW structure
911  * @eeprom_buf: optional pointer to EEPROM image
912  * @eeprom_buf_size: size of EEPROM image in words
913  * @pba_data_size: pointer to output variable
914  *
915  * Returns the size of the PBA block in words. Function operates on EEPROM
916  * image if the eeprom_buf pointer is not NULL otherwise it accesses physical
917  * EEPROM device.
918  *
919  **/
920 s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
921 			     u32 eeprom_buf_size, u16 *pba_block_size)
922 {
923 	s32 ret_val;
924 	u16 pba_word[2];
925 	u16 length;
926 
927 	DEBUGFUNC("ixgbe_get_pba_block_size");
928 
929 	if (eeprom_buf == NULL) {
930 		ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
931 						     &pba_word[0]);
932 		if (ret_val)
933 			return ret_val;
934 	} else {
935 		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
936 			pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
937 			pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
938 		} else {
939 			return IXGBE_ERR_PARAM;
940 		}
941 	}
942 
943 	if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
944 		if (eeprom_buf == NULL) {
945 			ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
946 						      &length);
947 			if (ret_val)
948 				return ret_val;
949 		} else {
950 			if (eeprom_buf_size > pba_word[1])
951 				length = eeprom_buf[pba_word[1] + 0];
952 			else
953 				return IXGBE_ERR_PARAM;
954 		}
955 
956 		if (length == 0xFFFF || length == 0)
957 			return IXGBE_ERR_PBA_SECTION;
958 	} else {
959 		/* PBA number in legacy format, there is no PBA Block. */
960 		length = 0;
961 	}
962 
963 	if (pba_block_size != NULL)
964 		*pba_block_size = length;
965 
966 	return IXGBE_SUCCESS;
967 }
968 
969 /**
970  * ixgbe_get_mac_addr_generic - Generic get MAC address
971  * @hw: pointer to hardware structure
972  * @mac_addr: Adapter MAC address
973  *
974  * Reads the adapter's MAC address from first Receive Address Register (RAR0)
975  * A reset of the adapter must be performed prior to calling this function
976  * in order for the MAC address to have been loaded from the EEPROM into RAR0
977  **/
978 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
979 {
980 	u32 rar_high;
981 	u32 rar_low;
982 	u16 i;
983 
984 	DEBUGFUNC("ixgbe_get_mac_addr_generic");
985 
986 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
987 	rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
988 
989 	for (i = 0; i < 4; i++)
990 		mac_addr[i] = (u8)(rar_low >> (i*8));
991 
992 	for (i = 0; i < 2; i++)
993 		mac_addr[i+4] = (u8)(rar_high >> (i*8));
994 
995 	return IXGBE_SUCCESS;
996 }
997 
998 /**
999  * ixgbe_set_pci_config_data_generic - Generic store PCI bus info
1000  * @hw: pointer to hardware structure
1001  * @link_status: the link status returned by the PCI config space
1002  *
1003  * Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure
1004  **/
1005 void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
1006 {
1007 	struct ixgbe_mac_info *mac = &hw->mac;
1008 
1009 	if (hw->bus.type == ixgbe_bus_type_unknown)
1010 		hw->bus.type = ixgbe_bus_type_pci_express;
1011 
1012 	switch (link_status & IXGBE_PCI_LINK_WIDTH) {
1013 	case IXGBE_PCI_LINK_WIDTH_1:
1014 		hw->bus.width = ixgbe_bus_width_pcie_x1;
1015 		break;
1016 	case IXGBE_PCI_LINK_WIDTH_2:
1017 		hw->bus.width = ixgbe_bus_width_pcie_x2;
1018 		break;
1019 	case IXGBE_PCI_LINK_WIDTH_4:
1020 		hw->bus.width = ixgbe_bus_width_pcie_x4;
1021 		break;
1022 	case IXGBE_PCI_LINK_WIDTH_8:
1023 		hw->bus.width = ixgbe_bus_width_pcie_x8;
1024 		break;
1025 	default:
1026 		hw->bus.width = ixgbe_bus_width_unknown;
1027 		break;
1028 	}
1029 
1030 	switch (link_status & IXGBE_PCI_LINK_SPEED) {
1031 	case IXGBE_PCI_LINK_SPEED_2500:
1032 		hw->bus.speed = ixgbe_bus_speed_2500;
1033 		break;
1034 	case IXGBE_PCI_LINK_SPEED_5000:
1035 		hw->bus.speed = ixgbe_bus_speed_5000;
1036 		break;
1037 	case IXGBE_PCI_LINK_SPEED_8000:
1038 		hw->bus.speed = ixgbe_bus_speed_8000;
1039 		break;
1040 	default:
1041 		hw->bus.speed = ixgbe_bus_speed_unknown;
1042 		break;
1043 	}
1044 
1045 	mac->ops.set_lan_id(hw);
1046 }
1047 
1048 /**
1049  * ixgbe_get_bus_info_generic - Generic set PCI bus info
1050  * @hw: pointer to hardware structure
1051  *
1052  * Gets the PCI bus info (speed, width, type) then calls helper function to
1053  * store this data within the ixgbe_hw structure.
1054  **/
1055 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
1056 {
1057 	u16 link_status;
1058 
1059 	DEBUGFUNC("ixgbe_get_bus_info_generic");
1060 
1061 	/* Get the negotiated link width and speed from PCI config space */
1062 	link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
1063 
1064 	ixgbe_set_pci_config_data_generic(hw, link_status);
1065 
1066 	return IXGBE_SUCCESS;
1067 }
1068 
1069 /**
1070  * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
1071  * @hw: pointer to the HW structure
1072  *
1073  * Determines the LAN function id by reading memory-mapped registers and swaps
1074  * the port value if requested, and set MAC instance for devices that share
1075  * CS4227.
1076  **/
1077 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
1078 {
1079 	struct ixgbe_bus_info *bus = &hw->bus;
1080 	u32 reg;
1081 	u16 ee_ctrl_4;
1082 
1083 	DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
1084 
1085 	reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
1086 	bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
1087 	bus->lan_id = (u8)bus->func;
1088 
1089 	/* check for a port swap */
1090 	reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
1091 	if (reg & IXGBE_FACTPS_LFS)
1092 		bus->func ^= 0x1;
1093 
1094 	/* Get MAC instance from EEPROM for configuring CS4227 */
1095 	if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
1096 		hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
1097 		bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
1098 				   IXGBE_EE_CTRL_4_INST_ID_SHIFT;
1099 	}
1100 }
1101 
1102 /**
1103  * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
1104  * @hw: pointer to hardware structure
1105  *
1106  * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
1107  * disables transmit and receive units. The adapter_stopped flag is used by
1108  * the shared code and drivers to determine if the adapter is in a stopped
1109  * state and should not touch the hardware.
1110  **/
1111 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
1112 {
1113 	u32 reg_val;
1114 	u16 i;
1115 
1116 	DEBUGFUNC("ixgbe_stop_adapter_generic");
1117 
1118 	/*
1119 	 * Set the adapter_stopped flag so other driver functions stop touching
1120 	 * the hardware
1121 	 */
1122 	hw->adapter_stopped = true;
1123 
1124 	/* Disable the receive unit */
1125 	ixgbe_disable_rx(hw);
1126 
1127 	/* Clear interrupt mask to stop interrupts from being generated */
1128 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
1129 
1130 	/* Clear any pending interrupts, flush previous writes */
1131 	IXGBE_READ_REG(hw, IXGBE_EICR);
1132 
1133 	/* Disable the transmit unit.  Each queue must be disabled. */
1134 	for (i = 0; i < hw->mac.max_tx_queues; i++)
1135 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
1136 
1137 	/* Disable the receive unit by stopping each queue */
1138 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
1139 		reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1140 		reg_val &= ~IXGBE_RXDCTL_ENABLE;
1141 		reg_val |= IXGBE_RXDCTL_SWFLSH;
1142 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
1143 	}
1144 
1145 	/* flush all queues disables */
1146 	IXGBE_WRITE_FLUSH(hw);
1147 	msec_delay(2);
1148 
1149 	/*
1150 	 * Prevent the PCI-E bus from hanging by disabling PCI-E primary
1151 	 * access and verify no pending requests
1152 	 */
1153 	return ixgbe_disable_pcie_primary(hw);
1154 }
1155 
1156 /**
1157  * ixgbe_init_led_link_act_generic - Store the LED index link/activity.
1158  * @hw: pointer to hardware structure
1159  *
1160  * Store the index for the link active LED. This will be used to support
1161  * blinking the LED.
1162  **/
1163 s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
1164 {
1165 	struct ixgbe_mac_info *mac = &hw->mac;
1166 	u32 led_reg, led_mode;
1167 	u8 i;
1168 
1169 	led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1170 
1171 	/* Get LED link active from the LEDCTL register */
1172 	for (i = 0; i < 4; i++) {
1173 		led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i);
1174 
1175 		if ((led_mode & IXGBE_LED_MODE_MASK_BASE) ==
1176 		     IXGBE_LED_LINK_ACTIVE) {
1177 			mac->led_link_act = i;
1178 			return IXGBE_SUCCESS;
1179 		}
1180 	}
1181 
1182 	/*
1183 	 * If LEDCTL register does not have the LED link active set, then use
1184 	 * known MAC defaults.
1185 	 */
1186 	switch (hw->mac.type) {
1187 	case ixgbe_mac_X550EM_a:
1188 	case ixgbe_mac_X550EM_x:
1189 		mac->led_link_act = 1;
1190 		break;
1191 	default:
1192 		mac->led_link_act = 2;
1193 	}
1194 	return IXGBE_SUCCESS;
1195 }
1196 
1197 /**
1198  * ixgbe_led_on_generic - Turns on the software controllable LEDs.
1199  * @hw: pointer to hardware structure
1200  * @index: led number to turn on
1201  **/
1202 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
1203 {
1204 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1205 
1206 	DEBUGFUNC("ixgbe_led_on_generic");
1207 
1208 	if (index > 3)
1209 		return IXGBE_ERR_PARAM;
1210 
1211 	/* To turn on the LED, set mode to ON. */
1212 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
1213 	led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
1214 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1215 	IXGBE_WRITE_FLUSH(hw);
1216 
1217 	return IXGBE_SUCCESS;
1218 }
1219 
1220 /**
1221  * ixgbe_led_off_generic - Turns off the software controllable LEDs.
1222  * @hw: pointer to hardware structure
1223  * @index: led number to turn off
1224  **/
1225 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
1226 {
1227 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1228 
1229 	DEBUGFUNC("ixgbe_led_off_generic");
1230 
1231 	if (index > 3)
1232 		return IXGBE_ERR_PARAM;
1233 
1234 	/* To turn off the LED, set mode to OFF. */
1235 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
1236 	led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
1237 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1238 	IXGBE_WRITE_FLUSH(hw);
1239 
1240 	return IXGBE_SUCCESS;
1241 }
1242 
1243 /**
1244  * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
1245  * @hw: pointer to hardware structure
1246  *
1247  * Initializes the EEPROM parameters ixgbe_eeprom_info within the
1248  * ixgbe_hw struct in order to set up EEPROM access.
1249  **/
1250 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
1251 {
1252 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1253 	u32 eec;
1254 	u16 eeprom_size;
1255 
1256 	DEBUGFUNC("ixgbe_init_eeprom_params_generic");
1257 
1258 	if (eeprom->type == ixgbe_eeprom_uninitialized) {
1259 		eeprom->type = ixgbe_eeprom_none;
1260 		/* Set default semaphore delay to 10ms which is a well
1261 		 * tested value */
1262 		eeprom->semaphore_delay = 10;
1263 		/* Clear EEPROM page size, it will be initialized as needed */
1264 		eeprom->word_page_size = 0;
1265 
1266 		/*
1267 		 * Check for EEPROM present first.
1268 		 * If not present leave as none
1269 		 */
1270 		eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1271 		if (eec & IXGBE_EEC_PRES) {
1272 			eeprom->type = ixgbe_eeprom_spi;
1273 
1274 			/*
1275 			 * SPI EEPROM is assumed here.  This code would need to
1276 			 * change if a future EEPROM is not SPI.
1277 			 */
1278 			eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1279 					    IXGBE_EEC_SIZE_SHIFT);
1280 			eeprom->word_size = 1 << (eeprom_size +
1281 					     IXGBE_EEPROM_WORD_SIZE_SHIFT);
1282 		}
1283 
1284 		if (eec & IXGBE_EEC_ADDR_SIZE)
1285 			eeprom->address_bits = 16;
1286 		else
1287 			eeprom->address_bits = 8;
1288 		DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
1289 			  "%d\n", eeprom->type, eeprom->word_size,
1290 			  eeprom->address_bits);
1291 	}
1292 
1293 	return IXGBE_SUCCESS;
1294 }
1295 
1296 /**
1297  * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
1298  * @hw: pointer to hardware structure
1299  * @offset: offset within the EEPROM to write
1300  * @words: number of word(s)
1301  * @data: 16 bit word(s) to write to EEPROM
1302  *
1303  * Reads 16 bit word(s) from EEPROM through bit-bang method
1304  **/
1305 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1306 					       u16 words, u16 *data)
1307 {
1308 	s32 status = IXGBE_SUCCESS;
1309 	u16 i, count;
1310 
1311 	DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
1312 
1313 	hw->eeprom.ops.init_params(hw);
1314 
1315 	if (words == 0) {
1316 		status = IXGBE_ERR_INVALID_ARGUMENT;
1317 		goto out;
1318 	}
1319 
1320 	if (offset + words > hw->eeprom.word_size) {
1321 		status = IXGBE_ERR_EEPROM;
1322 		goto out;
1323 	}
1324 
1325 	/*
1326 	 * The EEPROM page size cannot be queried from the chip. We do lazy
1327 	 * initialization. It is worth to do that when we write large buffer.
1328 	 */
1329 	if ((hw->eeprom.word_page_size == 0) &&
1330 	    (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
1331 		ixgbe_detect_eeprom_page_size_generic(hw, offset);
1332 
1333 	/*
1334 	 * We cannot hold synchronization semaphores for too long
1335 	 * to avoid other entity starvation. However it is more efficient
1336 	 * to read in bursts than synchronizing access for each word.
1337 	 */
1338 	for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1339 		count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1340 			IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1341 		status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
1342 							    count, &data[i]);
1343 
1344 		if (status != IXGBE_SUCCESS)
1345 			break;
1346 	}
1347 
1348 out:
1349 	return status;
1350 }
1351 
1352 /**
1353  * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
1354  * @hw: pointer to hardware structure
1355  * @offset: offset within the EEPROM to be written to
1356  * @words: number of word(s)
1357  * @data: 16 bit word(s) to be written to the EEPROM
1358  *
1359  * If ixgbe_eeprom_update_checksum is not called after this function, the
1360  * EEPROM will most likely contain an invalid checksum.
1361  **/
1362 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1363 					      u16 words, u16 *data)
1364 {
1365 	s32 status;
1366 	u16 word;
1367 	u16 page_size;
1368 	u16 i;
1369 	u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
1370 
1371 	DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
1372 
1373 	/* Prepare the EEPROM for writing  */
1374 	status = ixgbe_acquire_eeprom(hw);
1375 
1376 	if (status == IXGBE_SUCCESS) {
1377 		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1378 			ixgbe_release_eeprom(hw);
1379 			status = IXGBE_ERR_EEPROM;
1380 		}
1381 	}
1382 
1383 	if (status == IXGBE_SUCCESS) {
1384 		for (i = 0; i < words; i++) {
1385 			ixgbe_standby_eeprom(hw);
1386 
1387 			/*  Send the WRITE ENABLE command (8 bit opcode )  */
1388 			ixgbe_shift_out_eeprom_bits(hw,
1389 						   IXGBE_EEPROM_WREN_OPCODE_SPI,
1390 						   IXGBE_EEPROM_OPCODE_BITS);
1391 
1392 			ixgbe_standby_eeprom(hw);
1393 
1394 			/*
1395 			 * Some SPI eeproms use the 8th address bit embedded
1396 			 * in the opcode
1397 			 */
1398 			if ((hw->eeprom.address_bits == 8) &&
1399 			    ((offset + i) >= 128))
1400 				write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1401 
1402 			/* Send the Write command (8-bit opcode + addr) */
1403 			ixgbe_shift_out_eeprom_bits(hw, write_opcode,
1404 						    IXGBE_EEPROM_OPCODE_BITS);
1405 			ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1406 						    hw->eeprom.address_bits);
1407 
1408 			page_size = hw->eeprom.word_page_size;
1409 
1410 			/* Send the data in burst via SPI*/
1411 			do {
1412 				word = data[i];
1413 				word = (word >> 8) | (word << 8);
1414 				ixgbe_shift_out_eeprom_bits(hw, word, 16);
1415 
1416 				if (page_size == 0)
1417 					break;
1418 
1419 				/* do not wrap around page */
1420 				if (((offset + i) & (page_size - 1)) ==
1421 				    (page_size - 1))
1422 					break;
1423 			} while (++i < words);
1424 
1425 			ixgbe_standby_eeprom(hw);
1426 			msec_delay(10);
1427 		}
1428 		/* Done with writing - release the EEPROM */
1429 		ixgbe_release_eeprom(hw);
1430 	}
1431 
1432 	return status;
1433 }
1434 
1435 /**
1436  * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
1437  * @hw: pointer to hardware structure
1438  * @offset: offset within the EEPROM to be written to
1439  * @data: 16 bit word to be written to the EEPROM
1440  *
1441  * If ixgbe_eeprom_update_checksum is not called after this function, the
1442  * EEPROM will most likely contain an invalid checksum.
1443  **/
1444 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1445 {
1446 	s32 status;
1447 
1448 	DEBUGFUNC("ixgbe_write_eeprom_generic");
1449 
1450 	hw->eeprom.ops.init_params(hw);
1451 
1452 	if (offset >= hw->eeprom.word_size) {
1453 		status = IXGBE_ERR_EEPROM;
1454 		goto out;
1455 	}
1456 
1457 	status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
1458 
1459 out:
1460 	return status;
1461 }
1462 
1463 /**
1464  * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
1465  * @hw: pointer to hardware structure
1466  * @offset: offset within the EEPROM to be read
1467  * @data: read 16 bit words(s) from EEPROM
1468  * @words: number of word(s)
1469  *
1470  * Reads 16 bit word(s) from EEPROM through bit-bang method
1471  **/
1472 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1473 					      u16 words, u16 *data)
1474 {
1475 	s32 status = IXGBE_SUCCESS;
1476 	u16 i, count;
1477 
1478 	DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
1479 
1480 	hw->eeprom.ops.init_params(hw);
1481 
1482 	if (words == 0) {
1483 		status = IXGBE_ERR_INVALID_ARGUMENT;
1484 		goto out;
1485 	}
1486 
1487 	if (offset + words > hw->eeprom.word_size) {
1488 		status = IXGBE_ERR_EEPROM;
1489 		goto out;
1490 	}
1491 
1492 	/*
1493 	 * We cannot hold synchronization semaphores for too long
1494 	 * to avoid other entity starvation. However it is more efficient
1495 	 * to read in bursts than synchronizing access for each word.
1496 	 */
1497 	for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1498 		count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1499 			IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1500 
1501 		status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1502 							   count, &data[i]);
1503 
1504 		if (status != IXGBE_SUCCESS)
1505 			break;
1506 	}
1507 
1508 out:
1509 	return status;
1510 }
1511 
1512 /**
1513  * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1514  * @hw: pointer to hardware structure
1515  * @offset: offset within the EEPROM to be read
1516  * @words: number of word(s)
1517  * @data: read 16 bit word(s) from EEPROM
1518  *
1519  * Reads 16 bit word(s) from EEPROM through bit-bang method
1520  **/
1521 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1522 					     u16 words, u16 *data)
1523 {
1524 	s32 status;
1525 	u16 word_in;
1526 	u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1527 	u16 i;
1528 
1529 	DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
1530 
1531 	/* Prepare the EEPROM for reading  */
1532 	status = ixgbe_acquire_eeprom(hw);
1533 
1534 	if (status == IXGBE_SUCCESS) {
1535 		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1536 			ixgbe_release_eeprom(hw);
1537 			status = IXGBE_ERR_EEPROM;
1538 		}
1539 	}
1540 
1541 	if (status == IXGBE_SUCCESS) {
1542 		for (i = 0; i < words; i++) {
1543 			ixgbe_standby_eeprom(hw);
1544 			/*
1545 			 * Some SPI eeproms use the 8th address bit embedded
1546 			 * in the opcode
1547 			 */
1548 			if ((hw->eeprom.address_bits == 8) &&
1549 			    ((offset + i) >= 128))
1550 				read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1551 
1552 			/* Send the READ command (opcode + addr) */
1553 			ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1554 						    IXGBE_EEPROM_OPCODE_BITS);
1555 			ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1556 						    hw->eeprom.address_bits);
1557 
1558 			/* Read the data. */
1559 			word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1560 			data[i] = (word_in >> 8) | (word_in << 8);
1561 		}
1562 
1563 		/* End this read operation */
1564 		ixgbe_release_eeprom(hw);
1565 	}
1566 
1567 	return status;
1568 }
1569 
1570 /**
1571  * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1572  * @hw: pointer to hardware structure
1573  * @offset: offset within the EEPROM to be read
1574  * @data: read 16 bit value from EEPROM
1575  *
1576  * Reads 16 bit value from EEPROM through bit-bang method
1577  **/
1578 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1579 				       u16 *data)
1580 {
1581 	s32 status;
1582 
1583 	DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1584 
1585 	hw->eeprom.ops.init_params(hw);
1586 
1587 	if (offset >= hw->eeprom.word_size) {
1588 		status = IXGBE_ERR_EEPROM;
1589 		goto out;
1590 	}
1591 
1592 	status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1593 
1594 out:
1595 	return status;
1596 }
1597 
1598 /**
1599  * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1600  * @hw: pointer to hardware structure
1601  * @offset: offset of word in the EEPROM to read
1602  * @words: number of word(s)
1603  * @data: 16 bit word(s) from the EEPROM
1604  *
1605  * Reads a 16 bit word(s) from the EEPROM using the EERD register.
1606  **/
1607 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1608 				   u16 words, u16 *data)
1609 {
1610 	u32 eerd;
1611 	s32 status = IXGBE_SUCCESS;
1612 	u32 i;
1613 
1614 	DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
1615 
1616 	hw->eeprom.ops.init_params(hw);
1617 
1618 	if (words == 0) {
1619 		status = IXGBE_ERR_INVALID_ARGUMENT;
1620 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1621 		goto out;
1622 	}
1623 
1624 	if (offset >= hw->eeprom.word_size) {
1625 		status = IXGBE_ERR_EEPROM;
1626 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1627 		goto out;
1628 	}
1629 
1630 	for (i = 0; i < words; i++) {
1631 		eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1632 		       IXGBE_EEPROM_RW_REG_START;
1633 
1634 		IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1635 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1636 
1637 		if (status == IXGBE_SUCCESS) {
1638 			data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1639 				   IXGBE_EEPROM_RW_REG_DATA);
1640 		} else {
1641 			DEBUGOUT("Eeprom read timed out\n");
1642 			goto out;
1643 		}
1644 	}
1645 out:
1646 	return status;
1647 }
1648 
1649 /**
1650  * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
1651  * @hw: pointer to hardware structure
1652  * @offset: offset within the EEPROM to be used as a scratch pad
1653  *
1654  * Discover EEPROM page size by writing marching data at given offset.
1655  * This function is called only when we are writing a new large buffer
1656  * at given offset so the data would be overwritten anyway.
1657  **/
1658 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1659 						 u16 offset)
1660 {
1661 	u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1662 	s32 status = IXGBE_SUCCESS;
1663 	u16 i;
1664 
1665 	DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
1666 
1667 	for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1668 		data[i] = i;
1669 
1670 	hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1671 	status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1672 					     IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1673 	hw->eeprom.word_page_size = 0;
1674 	if (status != IXGBE_SUCCESS)
1675 		goto out;
1676 
1677 	status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1678 	if (status != IXGBE_SUCCESS)
1679 		goto out;
1680 
1681 	/*
1682 	 * When writing in burst more than the actual page size
1683 	 * EEPROM address wraps around current page.
1684 	 */
1685 	hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1686 
1687 	DEBUGOUT1("Detected EEPROM page size = %d words.",
1688 		  hw->eeprom.word_page_size);
1689 out:
1690 	return status;
1691 }
1692 
1693 /**
1694  * ixgbe_read_eerd_generic - Read EEPROM word using EERD
1695  * @hw: pointer to hardware structure
1696  * @offset: offset of  word in the EEPROM to read
1697  * @data: word read from the EEPROM
1698  *
1699  * Reads a 16 bit word from the EEPROM using the EERD register.
1700  **/
1701 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1702 {
1703 	return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1704 }
1705 
1706 /**
1707  * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1708  * @hw: pointer to hardware structure
1709  * @offset: offset of  word in the EEPROM to write
1710  * @words: number of word(s)
1711  * @data: word(s) write to the EEPROM
1712  *
1713  * Write a 16 bit word(s) to the EEPROM using the EEWR register.
1714  **/
1715 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1716 				    u16 words, u16 *data)
1717 {
1718 	u32 eewr;
1719 	s32 status = IXGBE_SUCCESS;
1720 	u16 i;
1721 
1722 	DEBUGFUNC("ixgbe_write_eewr_generic");
1723 
1724 	hw->eeprom.ops.init_params(hw);
1725 
1726 	if (words == 0) {
1727 		status = IXGBE_ERR_INVALID_ARGUMENT;
1728 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1729 		goto out;
1730 	}
1731 
1732 	if (offset >= hw->eeprom.word_size) {
1733 		status = IXGBE_ERR_EEPROM;
1734 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1735 		goto out;
1736 	}
1737 
1738 	for (i = 0; i < words; i++) {
1739 		eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1740 			(data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1741 			IXGBE_EEPROM_RW_REG_START;
1742 
1743 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1744 		if (status != IXGBE_SUCCESS) {
1745 			DEBUGOUT("Eeprom write EEWR timed out\n");
1746 			goto out;
1747 		}
1748 
1749 		IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1750 
1751 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1752 		if (status != IXGBE_SUCCESS) {
1753 			DEBUGOUT("Eeprom write EEWR timed out\n");
1754 			goto out;
1755 		}
1756 	}
1757 
1758 out:
1759 	return status;
1760 }
1761 
1762 /**
1763  * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1764  * @hw: pointer to hardware structure
1765  * @offset: offset of  word in the EEPROM to write
1766  * @data: word write to the EEPROM
1767  *
1768  * Write a 16 bit word to the EEPROM using the EEWR register.
1769  **/
1770 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1771 {
1772 	return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1773 }
1774 
1775 /**
1776  * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1777  * @hw: pointer to hardware structure
1778  * @ee_reg: EEPROM flag for polling
1779  *
1780  * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1781  * read or write is done respectively.
1782  **/
1783 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1784 {
1785 	u32 i;
1786 	u32 reg;
1787 	s32 status = IXGBE_ERR_EEPROM;
1788 
1789 	DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1790 
1791 	for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1792 		if (ee_reg == IXGBE_NVM_POLL_READ)
1793 			reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1794 		else
1795 			reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1796 
1797 		if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1798 			status = IXGBE_SUCCESS;
1799 			break;
1800 		}
1801 		usec_delay(5);
1802 	}
1803 
1804 	if (i == IXGBE_EERD_EEWR_ATTEMPTS)
1805 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
1806 			     "EEPROM read/write done polling timed out");
1807 
1808 	return status;
1809 }
1810 
1811 /**
1812  * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1813  * @hw: pointer to hardware structure
1814  *
1815  * Prepares EEPROM for access using bit-bang method. This function should
1816  * be called before issuing a command to the EEPROM.
1817  **/
1818 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1819 {
1820 	s32 status = IXGBE_SUCCESS;
1821 	u32 eec;
1822 	u32 i;
1823 
1824 	DEBUGFUNC("ixgbe_acquire_eeprom");
1825 
1826 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1827 	    != IXGBE_SUCCESS)
1828 		status = IXGBE_ERR_SWFW_SYNC;
1829 
1830 	if (status == IXGBE_SUCCESS) {
1831 		eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1832 
1833 		/* Request EEPROM Access */
1834 		eec |= IXGBE_EEC_REQ;
1835 		IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1836 
1837 		for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1838 			eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1839 			if (eec & IXGBE_EEC_GNT)
1840 				break;
1841 			usec_delay(5);
1842 		}
1843 
1844 		/* Release if grant not acquired */
1845 		if (!(eec & IXGBE_EEC_GNT)) {
1846 			eec &= ~IXGBE_EEC_REQ;
1847 			IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1848 			DEBUGOUT("Could not acquire EEPROM grant\n");
1849 
1850 			hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1851 			status = IXGBE_ERR_EEPROM;
1852 		}
1853 
1854 		/* Setup EEPROM for Read/Write */
1855 		if (status == IXGBE_SUCCESS) {
1856 			/* Clear CS and SK */
1857 			eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1858 			IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1859 			IXGBE_WRITE_FLUSH(hw);
1860 			usec_delay(1);
1861 		}
1862 	}
1863 	return status;
1864 }
1865 
1866 /**
1867  * ixgbe_get_eeprom_semaphore - Get hardware semaphore
1868  * @hw: pointer to hardware structure
1869  *
1870  * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1871  **/
1872 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1873 {
1874 	s32 status = IXGBE_ERR_EEPROM;
1875 	u32 timeout = 2000;
1876 	u32 i;
1877 	u32 swsm;
1878 
1879 	DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1880 
1881 
1882 	/* Get SMBI software semaphore between device drivers first */
1883 	for (i = 0; i < timeout; i++) {
1884 		/*
1885 		 * If the SMBI bit is 0 when we read it, then the bit will be
1886 		 * set and we have the semaphore
1887 		 */
1888 		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1889 		if (!(swsm & IXGBE_SWSM_SMBI)) {
1890 			status = IXGBE_SUCCESS;
1891 			break;
1892 		}
1893 		usec_delay(50);
1894 	}
1895 
1896 	if (i == timeout) {
1897 		DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1898 			 "not granted.\n");
1899 		/*
1900 		 * this release is particularly important because our attempts
1901 		 * above to get the semaphore may have succeeded, and if there
1902 		 * was a timeout, we should unconditionally clear the semaphore
1903 		 * bits to free the driver to make progress
1904 		 */
1905 		ixgbe_release_eeprom_semaphore(hw);
1906 
1907 		usec_delay(50);
1908 		/*
1909 		 * one last try
1910 		 * If the SMBI bit is 0 when we read it, then the bit will be
1911 		 * set and we have the semaphore
1912 		 */
1913 		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1914 		if (!(swsm & IXGBE_SWSM_SMBI))
1915 			status = IXGBE_SUCCESS;
1916 	}
1917 
1918 	/* Now get the semaphore between SW/FW through the SWESMBI bit */
1919 	if (status == IXGBE_SUCCESS) {
1920 		for (i = 0; i < timeout; i++) {
1921 			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1922 
1923 			/* Set the SW EEPROM semaphore bit to request access */
1924 			swsm |= IXGBE_SWSM_SWESMBI;
1925 			IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm);
1926 
1927 			/*
1928 			 * If we set the bit successfully then we got the
1929 			 * semaphore.
1930 			 */
1931 			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1932 			if (swsm & IXGBE_SWSM_SWESMBI)
1933 				break;
1934 
1935 			usec_delay(50);
1936 		}
1937 
1938 		/*
1939 		 * Release semaphores and return error if SW EEPROM semaphore
1940 		 * was not granted because we don't have access to the EEPROM
1941 		 */
1942 		if (i >= timeout) {
1943 			ERROR_REPORT1(IXGBE_ERROR_POLLING,
1944 			    "SWESMBI Software EEPROM semaphore not granted.\n");
1945 			ixgbe_release_eeprom_semaphore(hw);
1946 			status = IXGBE_ERR_EEPROM;
1947 		}
1948 	} else {
1949 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
1950 			     "Software semaphore SMBI between device drivers "
1951 			     "not granted.\n");
1952 	}
1953 
1954 	return status;
1955 }
1956 
1957 /**
1958  * ixgbe_release_eeprom_semaphore - Release hardware semaphore
1959  * @hw: pointer to hardware structure
1960  *
1961  * This function clears hardware semaphore bits.
1962  **/
1963 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1964 {
1965 	u32 swsm;
1966 
1967 	DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1968 
1969 	swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1970 
1971 	/* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1972 	swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1973 	IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1974 	IXGBE_WRITE_FLUSH(hw);
1975 }
1976 
1977 /**
1978  * ixgbe_ready_eeprom - Polls for EEPROM ready
1979  * @hw: pointer to hardware structure
1980  **/
1981 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1982 {
1983 	s32 status = IXGBE_SUCCESS;
1984 	u16 i;
1985 	u8 spi_stat_reg;
1986 
1987 	DEBUGFUNC("ixgbe_ready_eeprom");
1988 
1989 	/*
1990 	 * Read "Status Register" repeatedly until the LSB is cleared.  The
1991 	 * EEPROM will signal that the command has been completed by clearing
1992 	 * bit 0 of the internal status register.  If it's not cleared within
1993 	 * 5 milliseconds, then error out.
1994 	 */
1995 	for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1996 		ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1997 					    IXGBE_EEPROM_OPCODE_BITS);
1998 		spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
1999 		if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
2000 			break;
2001 
2002 		usec_delay(5);
2003 		ixgbe_standby_eeprom(hw);
2004 	}
2005 
2006 	/*
2007 	 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
2008 	 * devices (and only 0-5mSec on 5V devices)
2009 	 */
2010 	if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
2011 		DEBUGOUT("SPI EEPROM Status error\n");
2012 		status = IXGBE_ERR_EEPROM;
2013 	}
2014 
2015 	return status;
2016 }
2017 
2018 /**
2019  * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
2020  * @hw: pointer to hardware structure
2021  **/
2022 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
2023 {
2024 	u32 eec;
2025 
2026 	DEBUGFUNC("ixgbe_standby_eeprom");
2027 
2028 	eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2029 
2030 	/* Toggle CS to flush commands */
2031 	eec |= IXGBE_EEC_CS;
2032 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2033 	IXGBE_WRITE_FLUSH(hw);
2034 	usec_delay(1);
2035 	eec &= ~IXGBE_EEC_CS;
2036 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2037 	IXGBE_WRITE_FLUSH(hw);
2038 	usec_delay(1);
2039 }
2040 
2041 /**
2042  * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
2043  * @hw: pointer to hardware structure
2044  * @data: data to send to the EEPROM
2045  * @count: number of bits to shift out
2046  **/
2047 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
2048 					u16 count)
2049 {
2050 	u32 eec;
2051 	u32 mask;
2052 	u32 i;
2053 
2054 	DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
2055 
2056 	eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2057 
2058 	/*
2059 	 * Mask is used to shift "count" bits of "data" out to the EEPROM
2060 	 * one bit at a time.  Determine the starting bit based on count
2061 	 */
2062 	mask = 0x01 << (count - 1);
2063 
2064 	for (i = 0; i < count; i++) {
2065 		/*
2066 		 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
2067 		 * "1", and then raising and then lowering the clock (the SK
2068 		 * bit controls the clock input to the EEPROM).  A "0" is
2069 		 * shifted out to the EEPROM by setting "DI" to "0" and then
2070 		 * raising and then lowering the clock.
2071 		 */
2072 		if (data & mask)
2073 			eec |= IXGBE_EEC_DI;
2074 		else
2075 			eec &= ~IXGBE_EEC_DI;
2076 
2077 		IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2078 		IXGBE_WRITE_FLUSH(hw);
2079 
2080 		usec_delay(1);
2081 
2082 		ixgbe_raise_eeprom_clk(hw, &eec);
2083 		ixgbe_lower_eeprom_clk(hw, &eec);
2084 
2085 		/*
2086 		 * Shift mask to signify next bit of data to shift in to the
2087 		 * EEPROM
2088 		 */
2089 		mask = mask >> 1;
2090 	}
2091 
2092 	/* We leave the "DI" bit set to "0" when we leave this routine. */
2093 	eec &= ~IXGBE_EEC_DI;
2094 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2095 	IXGBE_WRITE_FLUSH(hw);
2096 }
2097 
2098 /**
2099  * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
2100  * @hw: pointer to hardware structure
2101  * @count: number of bits to shift
2102  **/
2103 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
2104 {
2105 	u32 eec;
2106 	u32 i;
2107 	u16 data = 0;
2108 
2109 	DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
2110 
2111 	/*
2112 	 * In order to read a register from the EEPROM, we need to shift
2113 	 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
2114 	 * the clock input to the EEPROM (setting the SK bit), and then reading
2115 	 * the value of the "DO" bit.  During this "shifting in" process the
2116 	 * "DI" bit should always be clear.
2117 	 */
2118 	eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2119 
2120 	eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
2121 
2122 	for (i = 0; i < count; i++) {
2123 		data = data << 1;
2124 		ixgbe_raise_eeprom_clk(hw, &eec);
2125 
2126 		eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2127 
2128 		eec &= ~(IXGBE_EEC_DI);
2129 		if (eec & IXGBE_EEC_DO)
2130 			data |= 1;
2131 
2132 		ixgbe_lower_eeprom_clk(hw, &eec);
2133 	}
2134 
2135 	return data;
2136 }
2137 
2138 /**
2139  * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
2140  * @hw: pointer to hardware structure
2141  * @eec: EEC register's current value
2142  **/
2143 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2144 {
2145 	DEBUGFUNC("ixgbe_raise_eeprom_clk");
2146 
2147 	/*
2148 	 * Raise the clock input to the EEPROM
2149 	 * (setting the SK bit), then delay
2150 	 */
2151 	*eec = *eec | IXGBE_EEC_SK;
2152 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
2153 	IXGBE_WRITE_FLUSH(hw);
2154 	usec_delay(1);
2155 }
2156 
2157 /**
2158  * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
2159  * @hw: pointer to hardware structure
2160  * @eec: EEC's current value
2161  **/
2162 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2163 {
2164 	DEBUGFUNC("ixgbe_lower_eeprom_clk");
2165 
2166 	/*
2167 	 * Lower the clock input to the EEPROM (clearing the SK bit), then
2168 	 * delay
2169 	 */
2170 	*eec = *eec & ~IXGBE_EEC_SK;
2171 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
2172 	IXGBE_WRITE_FLUSH(hw);
2173 	usec_delay(1);
2174 }
2175 
2176 /**
2177  * ixgbe_release_eeprom - Release EEPROM, release semaphores
2178  * @hw: pointer to hardware structure
2179  **/
2180 static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
2181 {
2182 	u32 eec;
2183 
2184 	DEBUGFUNC("ixgbe_release_eeprom");
2185 
2186 	eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2187 
2188 	eec |= IXGBE_EEC_CS;  /* Pull CS high */
2189 	eec &= ~IXGBE_EEC_SK; /* Lower SCK */
2190 
2191 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2192 	IXGBE_WRITE_FLUSH(hw);
2193 
2194 	usec_delay(1);
2195 
2196 	/* Stop requesting EEPROM access */
2197 	eec &= ~IXGBE_EEC_REQ;
2198 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2199 
2200 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
2201 
2202 	/* Delay before attempt to obtain semaphore again to allow FW access */
2203 	msec_delay(hw->eeprom.semaphore_delay);
2204 }
2205 
2206 /**
2207  * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
2208  * @hw: pointer to hardware structure
2209  *
2210  * Returns a negative error code on error, or the 16-bit checksum
2211  **/
2212 s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
2213 {
2214 	u16 i;
2215 	u16 j;
2216 	u16 checksum = 0;
2217 	u16 length = 0;
2218 	u16 pointer = 0;
2219 	u16 word = 0;
2220 
2221 	DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
2222 
2223 	/* Include 0x0-0x3F in the checksum */
2224 	for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
2225 		if (hw->eeprom.ops.read(hw, i, &word)) {
2226 			DEBUGOUT("EEPROM read failed\n");
2227 			return IXGBE_ERR_EEPROM;
2228 		}
2229 		checksum += word;
2230 	}
2231 
2232 	/* Include all data from pointers except for the fw pointer */
2233 	for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
2234 		if (hw->eeprom.ops.read(hw, i, &pointer)) {
2235 			DEBUGOUT("EEPROM read failed\n");
2236 			return IXGBE_ERR_EEPROM;
2237 		}
2238 
2239 		/* If the pointer seems invalid */
2240 		if (pointer == 0xFFFF || pointer == 0)
2241 			continue;
2242 
2243 		if (hw->eeprom.ops.read(hw, pointer, &length)) {
2244 			DEBUGOUT("EEPROM read failed\n");
2245 			return IXGBE_ERR_EEPROM;
2246 		}
2247 
2248 		if (length == 0xFFFF || length == 0)
2249 			continue;
2250 
2251 		for (j = pointer + 1; j <= pointer + length; j++) {
2252 			if (hw->eeprom.ops.read(hw, j, &word)) {
2253 				DEBUGOUT("EEPROM read failed\n");
2254 				return IXGBE_ERR_EEPROM;
2255 			}
2256 			checksum += word;
2257 		}
2258 	}
2259 
2260 	checksum = (u16)IXGBE_EEPROM_SUM - checksum;
2261 
2262 	return (s32)checksum;
2263 }
2264 
2265 /**
2266  * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
2267  * @hw: pointer to hardware structure
2268  * @checksum_val: calculated checksum
2269  *
2270  * Performs checksum calculation and validates the EEPROM checksum.  If the
2271  * caller does not need checksum_val, the value can be NULL.
2272  **/
2273 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
2274 					   u16 *checksum_val)
2275 {
2276 	s32 status;
2277 	u16 checksum;
2278 	u16 read_checksum = 0;
2279 
2280 	DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
2281 
2282 	/* Read the first word from the EEPROM. If this times out or fails, do
2283 	 * not continue or we could be in for a very long wait while every
2284 	 * EEPROM read fails
2285 	 */
2286 	status = hw->eeprom.ops.read(hw, 0, &checksum);
2287 	if (status) {
2288 		DEBUGOUT("EEPROM read failed\n");
2289 		return status;
2290 	}
2291 
2292 	status = hw->eeprom.ops.calc_checksum(hw);
2293 	if (status < 0)
2294 		return status;
2295 
2296 	checksum = (u16)(status & 0xffff);
2297 
2298 	status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
2299 	if (status) {
2300 		DEBUGOUT("EEPROM read failed\n");
2301 		return status;
2302 	}
2303 
2304 	/* Verify read checksum from EEPROM is the same as
2305 	 * calculated checksum
2306 	 */
2307 	if (read_checksum != checksum)
2308 		status = IXGBE_ERR_EEPROM_CHECKSUM;
2309 
2310 	/* If the user cares, return the calculated checksum */
2311 	if (checksum_val)
2312 		*checksum_val = checksum;
2313 
2314 	return status;
2315 }
2316 
2317 /**
2318  * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
2319  * @hw: pointer to hardware structure
2320  **/
2321 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
2322 {
2323 	s32 status;
2324 	u16 checksum;
2325 
2326 	DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
2327 
2328 	/* Read the first word from the EEPROM. If this times out or fails, do
2329 	 * not continue or we could be in for a very long wait while every
2330 	 * EEPROM read fails
2331 	 */
2332 	status = hw->eeprom.ops.read(hw, 0, &checksum);
2333 	if (status) {
2334 		DEBUGOUT("EEPROM read failed\n");
2335 		return status;
2336 	}
2337 
2338 	status = hw->eeprom.ops.calc_checksum(hw);
2339 	if (status < 0)
2340 		return status;
2341 
2342 	checksum = (u16)(status & 0xffff);
2343 
2344 	status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
2345 
2346 	return status;
2347 }
2348 
2349 /**
2350  * ixgbe_validate_mac_addr - Validate MAC address
2351  * @mac_addr: pointer to MAC address.
2352  *
2353  * Tests a MAC address to ensure it is a valid Individual Address.
2354  **/
2355 s32 ixgbe_validate_mac_addr(u8 *mac_addr)
2356 {
2357 	s32 status = IXGBE_SUCCESS;
2358 
2359 	DEBUGFUNC("ixgbe_validate_mac_addr");
2360 
2361 	/* Make sure it is not a multicast address */
2362 	if (IXGBE_IS_MULTICAST(mac_addr)) {
2363 		status = IXGBE_ERR_INVALID_MAC_ADDR;
2364 	/* Not a broadcast address */
2365 	} else if (IXGBE_IS_BROADCAST(mac_addr)) {
2366 		status = IXGBE_ERR_INVALID_MAC_ADDR;
2367 	/* Reject the zero address */
2368 	} else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
2369 		   mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
2370 		status = IXGBE_ERR_INVALID_MAC_ADDR;
2371 	}
2372 	return status;
2373 }
2374 
2375 /**
2376  * ixgbe_set_rar_generic - Set Rx address register
2377  * @hw: pointer to hardware structure
2378  * @index: Receive address register to write
2379  * @addr: Address to put into receive address register
2380  * @vmdq: VMDq "set" or "pool" index
2381  * @enable_addr: set flag that address is active
2382  *
2383  * Puts an ethernet address into a receive address register.
2384  **/
2385 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
2386 			  u32 enable_addr)
2387 {
2388 	u32 rar_low, rar_high;
2389 	u32 rar_entries = hw->mac.num_rar_entries;
2390 
2391 	DEBUGFUNC("ixgbe_set_rar_generic");
2392 
2393 	/* Make sure we are using a valid rar index range */
2394 	if (index >= rar_entries) {
2395 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2396 			     "RAR index %d is out of range.\n", index);
2397 		return IXGBE_ERR_INVALID_ARGUMENT;
2398 	}
2399 
2400 	/* setup VMDq pool selection before this RAR gets enabled */
2401 	hw->mac.ops.set_vmdq(hw, index, vmdq);
2402 
2403 	/*
2404 	 * HW expects these in little endian so we reverse the byte
2405 	 * order from network order (big endian) to little endian
2406 	 */
2407 	rar_low = ((u32)addr[0] |
2408 		   ((u32)addr[1] << 8) |
2409 		   ((u32)addr[2] << 16) |
2410 		   ((u32)addr[3] << 24));
2411 	/*
2412 	 * Some parts put the VMDq setting in the extra RAH bits,
2413 	 * so save everything except the lower 16 bits that hold part
2414 	 * of the address and the address valid bit.
2415 	 */
2416 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2417 	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2418 	rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
2419 
2420 	if (enable_addr != 0)
2421 		rar_high |= IXGBE_RAH_AV;
2422 
2423 	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
2424 	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2425 
2426 	return IXGBE_SUCCESS;
2427 }
2428 
2429 /**
2430  * ixgbe_clear_rar_generic - Remove Rx address register
2431  * @hw: pointer to hardware structure
2432  * @index: Receive address register to write
2433  *
2434  * Clears an ethernet address from a receive address register.
2435  **/
2436 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
2437 {
2438 	u32 rar_high;
2439 	u32 rar_entries = hw->mac.num_rar_entries;
2440 
2441 	DEBUGFUNC("ixgbe_clear_rar_generic");
2442 
2443 	/* Make sure we are using a valid rar index range */
2444 	if (index >= rar_entries) {
2445 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2446 			     "RAR index %d is out of range.\n", index);
2447 		return IXGBE_ERR_INVALID_ARGUMENT;
2448 	}
2449 
2450 	/*
2451 	 * Some parts put the VMDq setting in the extra RAH bits,
2452 	 * so save everything except the lower 16 bits that hold part
2453 	 * of the address and the address valid bit.
2454 	 */
2455 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2456 	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2457 
2458 	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
2459 	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2460 
2461 	/* clear VMDq pool/queue selection for this RAR */
2462 	hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
2463 
2464 	return IXGBE_SUCCESS;
2465 }
2466 
2467 /**
2468  * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
2469  * @hw: pointer to hardware structure
2470  *
2471  * Places the MAC address in receive address register 0 and clears the rest
2472  * of the receive address registers. Clears the multicast table. Assumes
2473  * the receiver is in reset when the routine is called.
2474  **/
2475 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
2476 {
2477 	u32 i;
2478 	u32 rar_entries = hw->mac.num_rar_entries;
2479 
2480 	DEBUGFUNC("ixgbe_init_rx_addrs_generic");
2481 
2482 	/*
2483 	 * If the current mac address is valid, assume it is a software override
2484 	 * to the permanent address.
2485 	 * Otherwise, use the permanent address from the eeprom.
2486 	 */
2487 	if (ixgbe_validate_mac_addr(hw->mac.addr) ==
2488 	    IXGBE_ERR_INVALID_MAC_ADDR) {
2489 		/* Get the MAC address from the RAR0 for later reference */
2490 		hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2491 
2492 		DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
2493 			  hw->mac.addr[0], hw->mac.addr[1],
2494 			  hw->mac.addr[2]);
2495 		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2496 			  hw->mac.addr[4], hw->mac.addr[5]);
2497 	} else {
2498 		/* Setup the receive address. */
2499 		DEBUGOUT("Overriding MAC Address in RAR[0]\n");
2500 		DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
2501 			  hw->mac.addr[0], hw->mac.addr[1],
2502 			  hw->mac.addr[2]);
2503 		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2504 			  hw->mac.addr[4], hw->mac.addr[5]);
2505 
2506 		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2507 	}
2508 
2509 	/* clear VMDq pool/queue selection for RAR 0 */
2510 	hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
2511 
2512 	hw->addr_ctrl.overflow_promisc = 0;
2513 
2514 	hw->addr_ctrl.rar_used_count = 1;
2515 
2516 	/* Zero out the other receive addresses. */
2517 	DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
2518 	for (i = 1; i < rar_entries; i++) {
2519 		IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
2520 		IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
2521 	}
2522 
2523 	/* Clear the MTA */
2524 	hw->addr_ctrl.mta_in_use = 0;
2525 	IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2526 
2527 	DEBUGOUT(" Clearing MTA\n");
2528 	for (i = 0; i < hw->mac.mcft_size; i++)
2529 		IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
2530 
2531 	ixgbe_init_uta_tables(hw);
2532 
2533 	return IXGBE_SUCCESS;
2534 }
2535 
2536 /**
2537  * ixgbe_add_uc_addr - Adds a secondary unicast address.
2538  * @hw: pointer to hardware structure
2539  * @addr: new address
2540  * @vmdq: VMDq "set" or "pool" index
2541  *
2542  * Adds it to unused receive address register or goes into promiscuous mode.
2543  **/
2544 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
2545 {
2546 	u32 rar_entries = hw->mac.num_rar_entries;
2547 	u32 rar;
2548 
2549 	DEBUGFUNC("ixgbe_add_uc_addr");
2550 
2551 	DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
2552 		  addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
2553 
2554 	/*
2555 	 * Place this address in the RAR if there is room,
2556 	 * else put the controller into promiscuous mode
2557 	 */
2558 	if (hw->addr_ctrl.rar_used_count < rar_entries) {
2559 		rar = hw->addr_ctrl.rar_used_count;
2560 		hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2561 		DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
2562 		hw->addr_ctrl.rar_used_count++;
2563 	} else {
2564 		hw->addr_ctrl.overflow_promisc++;
2565 	}
2566 
2567 	DEBUGOUT("ixgbe_add_uc_addr Complete\n");
2568 }
2569 
2570 /**
2571  * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
2572  * @hw: pointer to hardware structure
2573  * @addr_list: the list of new addresses
2574  * @addr_count: number of addresses
2575  * @next: iterator function to walk the address list
2576  *
2577  * The given list replaces any existing list.  Clears the secondary addrs from
2578  * receive address registers.  Uses unused receive address registers for the
2579  * first secondary addresses, and falls back to promiscuous mode as needed.
2580  *
2581  * Drivers using secondary unicast addresses must set user_set_promisc when
2582  * manually putting the device into promiscuous mode.
2583  **/
2584 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
2585 				      u32 addr_count, ixgbe_mc_addr_itr next)
2586 {
2587 	u8 *addr;
2588 	u32 i;
2589 	u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
2590 	u32 uc_addr_in_use;
2591 	u32 fctrl;
2592 	u32 vmdq;
2593 
2594 	DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
2595 
2596 	/*
2597 	 * Clear accounting of old secondary address list,
2598 	 * don't count RAR[0]
2599 	 */
2600 	uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
2601 	hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
2602 	hw->addr_ctrl.overflow_promisc = 0;
2603 
2604 	/* Zero out the other receive addresses */
2605 	DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
2606 	for (i = 0; i < uc_addr_in_use; i++) {
2607 		IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
2608 		IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
2609 	}
2610 
2611 	/* Add the new addresses */
2612 	for (i = 0; i < addr_count; i++) {
2613 		DEBUGOUT(" Adding the secondary addresses:\n");
2614 		addr = next(hw, &addr_list, &vmdq);
2615 		ixgbe_add_uc_addr(hw, addr, vmdq);
2616 	}
2617 
2618 	if (hw->addr_ctrl.overflow_promisc) {
2619 		/* enable promisc if not already in overflow or set by user */
2620 		if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2621 			DEBUGOUT(" Entering address overflow promisc mode\n");
2622 			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2623 			fctrl |= IXGBE_FCTRL_UPE;
2624 			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2625 		}
2626 	} else {
2627 		/* only disable if set by overflow, not by user */
2628 		if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2629 			DEBUGOUT(" Leaving address overflow promisc mode\n");
2630 			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2631 			fctrl &= ~IXGBE_FCTRL_UPE;
2632 			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2633 		}
2634 	}
2635 
2636 	DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
2637 	return IXGBE_SUCCESS;
2638 }
2639 
2640 /**
2641  * ixgbe_mta_vector - Determines bit-vector in multicast table to set
2642  * @hw: pointer to hardware structure
2643  * @mc_addr: the multicast address
2644  *
2645  * Extracts the 12 bits, from a multicast address, to determine which
2646  * bit-vector to set in the multicast table. The hardware uses 12 bits, from
2647  * incoming rx multicast addresses, to determine the bit-vector to check in
2648  * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
2649  * by the MO field of the MCSTCTRL. The MO field is set during initialization
2650  * to mc_filter_type.
2651  **/
2652 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
2653 {
2654 	u32 vector = 0;
2655 
2656 	DEBUGFUNC("ixgbe_mta_vector");
2657 
2658 	switch (hw->mac.mc_filter_type) {
2659 	case 0:   /* use bits [47:36] of the address */
2660 		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
2661 		break;
2662 	case 1:   /* use bits [46:35] of the address */
2663 		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
2664 		break;
2665 	case 2:   /* use bits [45:34] of the address */
2666 		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
2667 		break;
2668 	case 3:   /* use bits [43:32] of the address */
2669 		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
2670 		break;
2671 	default:  /* Invalid mc_filter_type */
2672 		DEBUGOUT("MC filter type param set incorrectly\n");
2673 		ASSERT(0);
2674 		break;
2675 	}
2676 
2677 	/* vector can only be 12-bits or boundary will be exceeded */
2678 	vector &= 0xFFF;
2679 	return vector;
2680 }
2681 
2682 /**
2683  * ixgbe_set_mta - Set bit-vector in multicast table
2684  * @hw: pointer to hardware structure
2685  * @mc_addr: Multicast address
2686  *
2687  * Sets the bit-vector in the multicast table.
2688  **/
2689 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
2690 {
2691 	u32 vector;
2692 	u32 vector_bit;
2693 	u32 vector_reg;
2694 
2695 	DEBUGFUNC("ixgbe_set_mta");
2696 
2697 	hw->addr_ctrl.mta_in_use++;
2698 
2699 	vector = ixgbe_mta_vector(hw, mc_addr);
2700 	DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
2701 
2702 	/*
2703 	 * The MTA is a register array of 128 32-bit registers. It is treated
2704 	 * like an array of 4096 bits.  We want to set bit
2705 	 * BitArray[vector_value]. So we figure out what register the bit is
2706 	 * in, read it, OR in the new bit, then write back the new value.  The
2707 	 * register is determined by the upper 7 bits of the vector value and
2708 	 * the bit within that register are determined by the lower 5 bits of
2709 	 * the value.
2710 	 */
2711 	vector_reg = (vector >> 5) & 0x7F;
2712 	vector_bit = vector & 0x1F;
2713 	hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2714 }
2715 
2716 /**
2717  * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2718  * @hw: pointer to hardware structure
2719  * @mc_addr_list: the list of new multicast addresses
2720  * @mc_addr_count: number of addresses
2721  * @next: iterator function to walk the multicast address list
2722  * @clear: flag, when set clears the table beforehand
2723  *
2724  * When the clear flag is set, the given list replaces any existing list.
2725  * Hashes the given addresses into the multicast table.
2726  **/
2727 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
2728 				      u32 mc_addr_count, ixgbe_mc_addr_itr next,
2729 				      bool clear)
2730 {
2731 	u32 i;
2732 	u32 vmdq;
2733 
2734 	DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2735 
2736 	/*
2737 	 * Set the new number of MC addresses that we are being requested to
2738 	 * use.
2739 	 */
2740 	hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2741 	hw->addr_ctrl.mta_in_use = 0;
2742 
2743 	/* Clear mta_shadow */
2744 	if (clear) {
2745 		DEBUGOUT(" Clearing MTA\n");
2746 		memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2747 	}
2748 
2749 	/* Update mta_shadow */
2750 	for (i = 0; i < mc_addr_count; i++) {
2751 		DEBUGOUT(" Adding the multicast addresses:\n");
2752 		ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2753 	}
2754 
2755 	/* Enable mta */
2756 	for (i = 0; i < hw->mac.mcft_size; i++)
2757 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2758 				      hw->mac.mta_shadow[i]);
2759 
2760 	if (hw->addr_ctrl.mta_in_use > 0)
2761 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2762 				IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2763 
2764 	DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2765 	return IXGBE_SUCCESS;
2766 }
2767 
2768 /**
2769  * ixgbe_enable_mc_generic - Enable multicast address in RAR
2770  * @hw: pointer to hardware structure
2771  *
2772  * Enables multicast address in RAR and the use of the multicast hash table.
2773  **/
2774 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2775 {
2776 	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2777 
2778 	DEBUGFUNC("ixgbe_enable_mc_generic");
2779 
2780 	if (a->mta_in_use > 0)
2781 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2782 				hw->mac.mc_filter_type);
2783 
2784 	return IXGBE_SUCCESS;
2785 }
2786 
2787 /**
2788  * ixgbe_disable_mc_generic - Disable multicast address in RAR
2789  * @hw: pointer to hardware structure
2790  *
2791  * Disables multicast address in RAR and the use of the multicast hash table.
2792  **/
2793 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2794 {
2795 	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2796 
2797 	DEBUGFUNC("ixgbe_disable_mc_generic");
2798 
2799 	if (a->mta_in_use > 0)
2800 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2801 
2802 	return IXGBE_SUCCESS;
2803 }
2804 
2805 /**
2806  * ixgbe_fc_enable_generic - Enable flow control
2807  * @hw: pointer to hardware structure
2808  *
2809  * Enable flow control according to the current settings.
2810  **/
2811 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2812 {
2813 	s32 ret_val = IXGBE_SUCCESS;
2814 	u32 mflcn_reg, fccfg_reg;
2815 	u32 reg;
2816 	u32 fcrtl, fcrth;
2817 	int i;
2818 
2819 	DEBUGFUNC("ixgbe_fc_enable_generic");
2820 
2821 	/* Validate the water mark configuration */
2822 	if (!hw->fc.pause_time) {
2823 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2824 		goto out;
2825 	}
2826 
2827 	/* Low water mark of zero causes XOFF floods */
2828 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2829 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2830 		    hw->fc.high_water[i]) {
2831 			if (!hw->fc.low_water[i] ||
2832 			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2833 				DEBUGOUT("Invalid water mark configuration\n");
2834 				ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2835 				goto out;
2836 			}
2837 		}
2838 	}
2839 
2840 	/* Negotiate the fc mode to use */
2841 	hw->mac.ops.fc_autoneg(hw);
2842 
2843 	/* Disable any previous flow control settings */
2844 	mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2845 	mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2846 
2847 	fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2848 	fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2849 
2850 	/*
2851 	 * The possible values of fc.current_mode are:
2852 	 * 0: Flow control is completely disabled
2853 	 * 1: Rx flow control is enabled (we can receive pause frames,
2854 	 *    but not send pause frames).
2855 	 * 2: Tx flow control is enabled (we can send pause frames but
2856 	 *    we do not support receiving pause frames).
2857 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2858 	 * other: Invalid.
2859 	 */
2860 	switch (hw->fc.current_mode) {
2861 	case ixgbe_fc_none:
2862 		/*
2863 		 * Flow control is disabled by software override or autoneg.
2864 		 * The code below will actually disable it in the HW.
2865 		 */
2866 		break;
2867 	case ixgbe_fc_rx_pause:
2868 		/*
2869 		 * Rx Flow control is enabled and Tx Flow control is
2870 		 * disabled by software override. Since there really
2871 		 * isn't a way to advertise that we are capable of RX
2872 		 * Pause ONLY, we will advertise that we support both
2873 		 * symmetric and asymmetric Rx PAUSE.  Later, we will
2874 		 * disable the adapter's ability to send PAUSE frames.
2875 		 */
2876 		mflcn_reg |= IXGBE_MFLCN_RFCE;
2877 		break;
2878 	case ixgbe_fc_tx_pause:
2879 		/*
2880 		 * Tx Flow control is enabled, and Rx Flow control is
2881 		 * disabled by software override.
2882 		 */
2883 		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2884 		break;
2885 	case ixgbe_fc_full:
2886 		/* Flow control (both Rx and Tx) is enabled by SW override. */
2887 		mflcn_reg |= IXGBE_MFLCN_RFCE;
2888 		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2889 		break;
2890 	default:
2891 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
2892 			     "Flow control param set incorrectly\n");
2893 		ret_val = IXGBE_ERR_CONFIG;
2894 		goto out;
2895 		break;
2896 	}
2897 
2898 	/* Set 802.3x based flow control settings. */
2899 	mflcn_reg |= IXGBE_MFLCN_DPF;
2900 	IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2901 	IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2902 
2903 
2904 	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
2905 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2906 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2907 		    hw->fc.high_water[i]) {
2908 			fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2909 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2910 			fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2911 		} else {
2912 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2913 			/*
2914 			 * In order to prevent Tx hangs when the internal Tx
2915 			 * switch is enabled we must set the high water mark
2916 			 * to the Rx packet buffer size - 24KB.  This allows
2917 			 * the Tx switch to function even under heavy Rx
2918 			 * workloads.
2919 			 */
2920 			fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
2921 		}
2922 
2923 		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2924 	}
2925 
2926 	/* Configure pause time (2 TCs per register) */
2927 	reg = hw->fc.pause_time * 0x00010001;
2928 	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2929 		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2930 
2931 	/* Configure flow control refresh threshold value */
2932 	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2933 
2934 out:
2935 	return ret_val;
2936 }
2937 
2938 /**
2939  * ixgbe_negotiate_fc - Negotiate flow control
2940  * @hw: pointer to hardware structure
2941  * @adv_reg: flow control advertised settings
2942  * @lp_reg: link partner's flow control settings
2943  * @adv_sym: symmetric pause bit in advertisement
2944  * @adv_asm: asymmetric pause bit in advertisement
2945  * @lp_sym: symmetric pause bit in link partner advertisement
2946  * @lp_asm: asymmetric pause bit in link partner advertisement
2947  *
2948  * Find the intersection between advertised settings and link partner's
2949  * advertised settings
2950  **/
2951 s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2952 		       u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2953 {
2954 	if ((!(adv_reg)) ||  (!(lp_reg))) {
2955 		ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
2956 			     "Local or link partner's advertised flow control "
2957 			     "settings are NULL. Local: %x, link partner: %x\n",
2958 			     adv_reg, lp_reg);
2959 		return IXGBE_ERR_FC_NOT_NEGOTIATED;
2960 	}
2961 
2962 	if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2963 		/*
2964 		 * Now we need to check if the user selected Rx ONLY
2965 		 * of pause frames.  In this case, we had to advertise
2966 		 * FULL flow control because we could not advertise RX
2967 		 * ONLY. Hence, we must now check to see if we need to
2968 		 * turn OFF the TRANSMISSION of PAUSE frames.
2969 		 */
2970 		if (hw->fc.requested_mode == ixgbe_fc_full) {
2971 			hw->fc.current_mode = ixgbe_fc_full;
2972 			DEBUGOUT("Flow Control = FULL.\n");
2973 		} else {
2974 			hw->fc.current_mode = ixgbe_fc_rx_pause;
2975 			DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2976 		}
2977 	} else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2978 		   (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2979 		hw->fc.current_mode = ixgbe_fc_tx_pause;
2980 		DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2981 	} else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2982 		   !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2983 		hw->fc.current_mode = ixgbe_fc_rx_pause;
2984 		DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2985 	} else {
2986 		hw->fc.current_mode = ixgbe_fc_none;
2987 		DEBUGOUT("Flow Control = NONE.\n");
2988 	}
2989 	return IXGBE_SUCCESS;
2990 }
2991 
2992 /**
2993  * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2994  * @hw: pointer to hardware structure
2995  *
2996  * Enable flow control according on 1 gig fiber.
2997  **/
2998 static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2999 {
3000 	u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
3001 	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3002 
3003 	/*
3004 	 * On multispeed fiber at 1g, bail out if
3005 	 * - link is up but AN did not complete, or if
3006 	 * - link is up and AN completed but timed out
3007 	 */
3008 
3009 	linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
3010 	if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
3011 	    (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
3012 		DEBUGOUT("Auto-Negotiation did not complete or timed out\n");
3013 		goto out;
3014 	}
3015 
3016 	pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
3017 	pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
3018 
3019 	ret_val =  ixgbe_negotiate_fc(hw, pcs_anadv_reg,
3020 				      pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
3021 				      IXGBE_PCS1GANA_ASM_PAUSE,
3022 				      IXGBE_PCS1GANA_SYM_PAUSE,
3023 				      IXGBE_PCS1GANA_ASM_PAUSE);
3024 
3025 out:
3026 	return ret_val;
3027 }
3028 
3029 /**
3030  * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
3031  * @hw: pointer to hardware structure
3032  *
3033  * Enable flow control according to IEEE clause 37.
3034  **/
3035 static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
3036 {
3037 	u32 links2, anlp1_reg, autoc_reg, links;
3038 	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3039 
3040 	/*
3041 	 * On backplane, bail out if
3042 	 * - backplane autoneg was not completed, or if
3043 	 * - we are 82599 and link partner is not AN enabled
3044 	 */
3045 	links = IXGBE_READ_REG(hw, IXGBE_LINKS);
3046 	if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
3047 		DEBUGOUT("Auto-Negotiation did not complete\n");
3048 		goto out;
3049 	}
3050 
3051 	if (hw->mac.type == ixgbe_mac_82599EB) {
3052 		links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
3053 		if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
3054 			DEBUGOUT("Link partner is not AN enabled\n");
3055 			goto out;
3056 		}
3057 	}
3058 	/*
3059 	 * Read the 10g AN autoc and LP ability registers and resolve
3060 	 * local flow control settings accordingly
3061 	 */
3062 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3063 	anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
3064 
3065 	ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
3066 		anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
3067 		IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
3068 
3069 out:
3070 	return ret_val;
3071 }
3072 
3073 /**
3074  * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
3075  * @hw: pointer to hardware structure
3076  *
3077  * Enable flow control according to IEEE clause 37.
3078  **/
3079 static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
3080 {
3081 	u16 technology_ability_reg = 0;
3082 	u16 lp_technology_ability_reg = 0;
3083 
3084 	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
3085 			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3086 			     &technology_ability_reg);
3087 	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
3088 			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3089 			     &lp_technology_ability_reg);
3090 
3091 	return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
3092 				  (u32)lp_technology_ability_reg,
3093 				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
3094 				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
3095 }
3096 
3097 /**
3098  * ixgbe_fc_autoneg - Configure flow control
3099  * @hw: pointer to hardware structure
3100  *
3101  * Compares our advertised flow control capabilities to those advertised by
3102  * our link partner, and determines the proper flow control mode to use.
3103  **/
3104 void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
3105 {
3106 	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3107 	ixgbe_link_speed speed;
3108 	bool link_up;
3109 
3110 	DEBUGFUNC("ixgbe_fc_autoneg");
3111 
3112 	/*
3113 	 * AN should have completed when the cable was plugged in.
3114 	 * Look for reasons to bail out.  Bail out if:
3115 	 * - FC autoneg is disabled, or if
3116 	 * - link is not up.
3117 	 */
3118 	if (hw->fc.disable_fc_autoneg) {
3119 		/* TODO: This should be just an informative log */
3120 		ERROR_REPORT1(IXGBE_ERROR_CAUTION,
3121 			      "Flow control autoneg is disabled");
3122 		goto out;
3123 	}
3124 
3125 	hw->mac.ops.check_link(hw, &speed, &link_up, false);
3126 	if (!link_up) {
3127 		ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
3128 		goto out;
3129 	}
3130 
3131 	switch (hw->phy.media_type) {
3132 	/* Autoneg flow control on fiber adapters */
3133 	case ixgbe_media_type_fiber_fixed:
3134 	case ixgbe_media_type_fiber_qsfp:
3135 	case ixgbe_media_type_fiber:
3136 		if (speed == IXGBE_LINK_SPEED_1GB_FULL)
3137 			ret_val = ixgbe_fc_autoneg_fiber(hw);
3138 		break;
3139 
3140 	/* Autoneg flow control on backplane adapters */
3141 	case ixgbe_media_type_backplane:
3142 		ret_val = ixgbe_fc_autoneg_backplane(hw);
3143 		break;
3144 
3145 	/* Autoneg flow control on copper adapters */
3146 	case ixgbe_media_type_copper:
3147 		if (ixgbe_device_supports_autoneg_fc(hw))
3148 			ret_val = ixgbe_fc_autoneg_copper(hw);
3149 		break;
3150 
3151 	default:
3152 		break;
3153 	}
3154 
3155 out:
3156 	if (ret_val == IXGBE_SUCCESS) {
3157 		hw->fc.fc_was_autonegged = true;
3158 	} else {
3159 		hw->fc.fc_was_autonegged = false;
3160 		hw->fc.current_mode = hw->fc.requested_mode;
3161 	}
3162 }
3163 
3164 /*
3165  * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
3166  * @hw: pointer to hardware structure
3167  *
3168  * System-wide timeout range is encoded in PCIe Device Control2 register.
3169  *
3170  * Add 10% to specified maximum and return the number of times to poll for
3171  * completion timeout, in units of 100 microsec.  Never return less than
3172  * 800 = 80 millisec.
3173  */
3174 static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
3175 {
3176 	s16 devctl2;
3177 	u32 pollcnt;
3178 
3179 	devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
3180 	devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
3181 
3182 	switch (devctl2) {
3183 	case IXGBE_PCIDEVCTRL2_65_130ms:
3184 		pollcnt = 1300;		/* 130 millisec */
3185 		break;
3186 	case IXGBE_PCIDEVCTRL2_260_520ms:
3187 		pollcnt = 5200;		/* 520 millisec */
3188 		break;
3189 	case IXGBE_PCIDEVCTRL2_1_2s:
3190 		pollcnt = 20000;	/* 2 sec */
3191 		break;
3192 	case IXGBE_PCIDEVCTRL2_4_8s:
3193 		pollcnt = 80000;	/* 8 sec */
3194 		break;
3195 	case IXGBE_PCIDEVCTRL2_17_34s:
3196 		pollcnt = 34000;	/* 34 sec */
3197 		break;
3198 	case IXGBE_PCIDEVCTRL2_50_100us:	/* 100 microsecs */
3199 	case IXGBE_PCIDEVCTRL2_1_2ms:		/* 2 millisecs */
3200 	case IXGBE_PCIDEVCTRL2_16_32ms:		/* 32 millisec */
3201 	case IXGBE_PCIDEVCTRL2_16_32ms_def:	/* 32 millisec default */
3202 	default:
3203 		pollcnt = 800;		/* 80 millisec minimum */
3204 		break;
3205 	}
3206 
3207 	/* add 10% to spec maximum */
3208 	return (pollcnt * 11) / 10;
3209 }
3210 
3211 /**
3212  * ixgbe_disable_pcie_primary - Disable PCI-express primary access
3213  * @hw: pointer to hardware structure
3214  *
3215  * Disables PCI-Express primary access and verifies there are no pending
3216  * requests. IXGBE_ERR_PRIMARY_REQUESTS_PENDING is returned if primary disable
3217  * bit hasn't caused the primary requests to be disabled, else IXGBE_SUCCESS
3218  * is returned signifying primary requests disabled.
3219  **/
3220 s32 ixgbe_disable_pcie_primary(struct ixgbe_hw *hw)
3221 {
3222 	s32 status = IXGBE_SUCCESS;
3223 	u32 i, poll;
3224 	u16 value;
3225 
3226 	DEBUGFUNC("ixgbe_disable_pcie_primary");
3227 
3228 	/* Always set this bit to ensure any future transactions are blocked */
3229 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
3230 
3231 	/* Exit if primary requests are blocked */
3232 	if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
3233 	    IXGBE_REMOVED(hw->hw_addr))
3234 		goto out;
3235 
3236 	/* Poll for primary request bit to clear */
3237 	for (i = 0; i < IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT; i++) {
3238 		usec_delay(100);
3239 		if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
3240 			goto out;
3241 	}
3242 
3243 	/*
3244 	 * Two consecutive resets are required via CTRL.RST per datasheet
3245 	 * 5.2.5.3.2 Primary Disable.  We set a flag to inform the reset routine
3246 	 * of this need. The first reset prevents new primary requests from
3247 	 * being issued by our device.  We then must wait 1usec or more for any
3248 	 * remaining completions from the PCIe bus to trickle in, and then reset
3249 	 * again to clear out any effects they may have had on our device.
3250 	 */
3251 	DEBUGOUT("GIO Primary Disable bit didn't clear - requesting resets\n");
3252 	hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
3253 
3254 	if (hw->mac.type >= ixgbe_mac_X550)
3255 		goto out;
3256 
3257 	/*
3258 	 * Before proceeding, make sure that the PCIe block does not have
3259 	 * transactions pending.
3260 	 */
3261 	poll = ixgbe_pcie_timeout_poll(hw);
3262 	for (i = 0; i < poll; i++) {
3263 		usec_delay(100);
3264 		value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
3265 		if (IXGBE_REMOVED(hw->hw_addr))
3266 			goto out;
3267 		if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
3268 			goto out;
3269 	}
3270 
3271 	ERROR_REPORT1(IXGBE_ERROR_POLLING,
3272 		     "PCIe transaction pending bit also did not clear.\n");
3273 	status = IXGBE_ERR_PRIMARY_REQUESTS_PENDING;
3274 
3275 out:
3276 	return status;
3277 }
3278 
3279 /**
3280  * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
3281  * @hw: pointer to hardware structure
3282  * @mask: Mask to specify which semaphore to acquire
3283  *
3284  * Acquires the SWFW semaphore through the GSSR register for the specified
3285  * function (CSR, PHY0, PHY1, EEPROM, Flash)
3286  **/
3287 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
3288 {
3289 	u32 gssr = 0;
3290 	u32 swmask = mask;
3291 	u32 fwmask = mask << 5;
3292 	u32 timeout = 200;
3293 	u32 i;
3294 
3295 	DEBUGFUNC("ixgbe_acquire_swfw_sync");
3296 
3297 	for (i = 0; i < timeout; i++) {
3298 		/*
3299 		 * SW NVM semaphore bit is used for access to all
3300 		 * SW_FW_SYNC bits (not just NVM)
3301 		 */
3302 		if (ixgbe_get_eeprom_semaphore(hw))
3303 			return IXGBE_ERR_SWFW_SYNC;
3304 
3305 		gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3306 		if (!(gssr & (fwmask | swmask))) {
3307 			gssr |= swmask;
3308 			IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3309 			ixgbe_release_eeprom_semaphore(hw);
3310 			return IXGBE_SUCCESS;
3311 		} else {
3312 			/* Resource is currently in use by FW or SW */
3313 			ixgbe_release_eeprom_semaphore(hw);
3314 			msec_delay(5);
3315 		}
3316 	}
3317 
3318 	/* If time expired clear the bits holding the lock and retry */
3319 	if (gssr & (fwmask | swmask))
3320 		ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
3321 
3322 	msec_delay(5);
3323 	return IXGBE_ERR_SWFW_SYNC;
3324 }
3325 
3326 /**
3327  * ixgbe_release_swfw_sync - Release SWFW semaphore
3328  * @hw: pointer to hardware structure
3329  * @mask: Mask to specify which semaphore to release
3330  *
3331  * Releases the SWFW semaphore through the GSSR register for the specified
3332  * function (CSR, PHY0, PHY1, EEPROM, Flash)
3333  **/
3334 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
3335 {
3336 	u32 gssr;
3337 	u32 swmask = mask;
3338 
3339 	DEBUGFUNC("ixgbe_release_swfw_sync");
3340 
3341 	ixgbe_get_eeprom_semaphore(hw);
3342 
3343 	gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3344 	gssr &= ~swmask;
3345 	IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3346 
3347 	ixgbe_release_eeprom_semaphore(hw);
3348 }
3349 
3350 /**
3351  * ixgbe_disable_sec_rx_path_generic - Stops the receive data path
3352  * @hw: pointer to hardware structure
3353  *
3354  * Stops the receive data path and waits for the HW to internally empty
3355  * the Rx security block
3356  **/
3357 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
3358 {
3359 #define IXGBE_MAX_SECRX_POLL 4000
3360 
3361 	int i;
3362 	int secrxreg;
3363 
3364 	DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
3365 
3366 
3367 	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3368 	secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
3369 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3370 	for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
3371 		secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
3372 		if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
3373 			break;
3374 		else
3375 			/* Use interrupt-safe sleep just in case */
3376 			usec_delay(10);
3377 	}
3378 
3379 	/* For informational purposes only */
3380 	if (i >= IXGBE_MAX_SECRX_POLL)
3381 		DEBUGOUT("Rx unit being enabled before security "
3382 			 "path fully disabled.  Continuing with init.\n");
3383 
3384 	return IXGBE_SUCCESS;
3385 }
3386 
3387 /**
3388  * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
3389  * @hw: pointer to hardware structure
3390  * @locked: bool to indicate whether the SW/FW lock was taken
3391  * @reg_val: Value we read from AUTOC
3392  *
3393  * The default case requires no protection so just to the register read.
3394  */
3395 s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
3396 {
3397 	*locked = false;
3398 	*reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3399 	return IXGBE_SUCCESS;
3400 }
3401 
3402 /**
3403  * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
3404  * @hw: pointer to hardware structure
3405  * @reg_val: value to write to AUTOC
3406  * @locked: bool to indicate whether the SW/FW lock was already taken by
3407  *          previous read.
3408  *
3409  * The default case requires no protection so just to the register write.
3410  */
3411 s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
3412 {
3413 	UNREFERENCED_1PARAMETER(locked);
3414 
3415 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
3416 	return IXGBE_SUCCESS;
3417 }
3418 
3419 /**
3420  * ixgbe_enable_sec_rx_path_generic - Enables the receive data path
3421  * @hw: pointer to hardware structure
3422  *
3423  * Enables the receive data path.
3424  **/
3425 s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
3426 {
3427 	u32 secrxreg;
3428 
3429 	DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
3430 
3431 	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3432 	secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
3433 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3434 	IXGBE_WRITE_FLUSH(hw);
3435 
3436 	return IXGBE_SUCCESS;
3437 }
3438 
3439 /**
3440  * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
3441  * @hw: pointer to hardware structure
3442  * @regval: register value to write to RXCTRL
3443  *
3444  * Enables the Rx DMA unit
3445  **/
3446 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
3447 {
3448 	DEBUGFUNC("ixgbe_enable_rx_dma_generic");
3449 
3450 	if (regval & IXGBE_RXCTRL_RXEN)
3451 		ixgbe_enable_rx(hw);
3452 	else
3453 		ixgbe_disable_rx(hw);
3454 
3455 	return IXGBE_SUCCESS;
3456 }
3457 
3458 /**
3459  * ixgbe_blink_led_start_generic - Blink LED based on index.
3460  * @hw: pointer to hardware structure
3461  * @index: led number to blink
3462  **/
3463 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
3464 {
3465 	ixgbe_link_speed speed = 0;
3466 	bool link_up = 0;
3467 	u32 autoc_reg = 0;
3468 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3469 	s32 ret_val = IXGBE_SUCCESS;
3470 	bool locked = false;
3471 
3472 	DEBUGFUNC("ixgbe_blink_led_start_generic");
3473 
3474 	if (index > 3)
3475 		return IXGBE_ERR_PARAM;
3476 
3477 	/*
3478 	 * Link must be up to auto-blink the LEDs;
3479 	 * Force it if link is down.
3480 	 */
3481 	hw->mac.ops.check_link(hw, &speed, &link_up, false);
3482 
3483 	if (!link_up) {
3484 		ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3485 		if (ret_val != IXGBE_SUCCESS)
3486 			goto out;
3487 
3488 		autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3489 		autoc_reg |= IXGBE_AUTOC_FLU;
3490 
3491 		ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3492 		if (ret_val != IXGBE_SUCCESS)
3493 			goto out;
3494 
3495 		IXGBE_WRITE_FLUSH(hw);
3496 		msec_delay(10);
3497 	}
3498 
3499 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
3500 	led_reg |= IXGBE_LED_BLINK(index);
3501 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3502 	IXGBE_WRITE_FLUSH(hw);
3503 
3504 out:
3505 	return ret_val;
3506 }
3507 
3508 /**
3509  * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
3510  * @hw: pointer to hardware structure
3511  * @index: led number to stop blinking
3512  **/
3513 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
3514 {
3515 	u32 autoc_reg = 0;
3516 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3517 	s32 ret_val = IXGBE_SUCCESS;
3518 	bool locked = false;
3519 
3520 	DEBUGFUNC("ixgbe_blink_led_stop_generic");
3521 
3522 	if (index > 3)
3523 		return IXGBE_ERR_PARAM;
3524 
3525 	ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3526 	if (ret_val != IXGBE_SUCCESS)
3527 		goto out;
3528 
3529 	autoc_reg &= ~IXGBE_AUTOC_FLU;
3530 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3531 
3532 	ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3533 	if (ret_val != IXGBE_SUCCESS)
3534 		goto out;
3535 
3536 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
3537 	led_reg &= ~IXGBE_LED_BLINK(index);
3538 	led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
3539 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3540 	IXGBE_WRITE_FLUSH(hw);
3541 
3542 out:
3543 	return ret_val;
3544 }
3545 
3546 /**
3547  * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
3548  * @hw: pointer to hardware structure
3549  * @san_mac_offset: SAN MAC address offset
3550  *
3551  * This function will read the EEPROM location for the SAN MAC address
3552  * pointer, and returns the value at that location.  This is used in both
3553  * get and set mac_addr routines.
3554  **/
3555 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
3556 					 u16 *san_mac_offset)
3557 {
3558 	s32 ret_val;
3559 
3560 	DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
3561 
3562 	/*
3563 	 * First read the EEPROM pointer to see if the MAC addresses are
3564 	 * available.
3565 	 */
3566 	ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
3567 				      san_mac_offset);
3568 	if (ret_val) {
3569 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3570 			      "eeprom at offset %d failed",
3571 			      IXGBE_SAN_MAC_ADDR_PTR);
3572 	}
3573 
3574 	return ret_val;
3575 }
3576 
3577 /**
3578  * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
3579  * @hw: pointer to hardware structure
3580  * @san_mac_addr: SAN MAC address
3581  *
3582  * Reads the SAN MAC address from the EEPROM, if it's available.  This is
3583  * per-port, so set_lan_id() must be called before reading the addresses.
3584  * set_lan_id() is called by identify_sfp(), but this cannot be relied
3585  * upon for non-SFP connections, so we must call it here.
3586  **/
3587 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3588 {
3589 	u16 san_mac_data, san_mac_offset;
3590 	u8 i;
3591 	s32 ret_val;
3592 
3593 	DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
3594 
3595 	/*
3596 	 * First read the EEPROM pointer to see if the MAC addresses are
3597 	 * available.  If they're not, no point in calling set_lan_id() here.
3598 	 */
3599 	ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3600 	if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3601 		goto san_mac_addr_out;
3602 
3603 	/* make sure we know which port we need to program */
3604 	hw->mac.ops.set_lan_id(hw);
3605 	/* apply the port offset to the address offset */
3606 	(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3607 			 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3608 	for (i = 0; i < 3; i++) {
3609 		ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
3610 					      &san_mac_data);
3611 		if (ret_val) {
3612 			ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3613 				      "eeprom read at offset %d failed",
3614 				      san_mac_offset);
3615 			goto san_mac_addr_out;
3616 		}
3617 		san_mac_addr[i * 2] = (u8)(san_mac_data);
3618 		san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
3619 		san_mac_offset++;
3620 	}
3621 	return IXGBE_SUCCESS;
3622 
3623 san_mac_addr_out:
3624 	/*
3625 	 * No addresses available in this EEPROM.  It's not an
3626 	 * error though, so just wipe the local address and return.
3627 	 */
3628 	for (i = 0; i < 6; i++)
3629 		san_mac_addr[i] = 0xFF;
3630 	return IXGBE_SUCCESS;
3631 }
3632 
3633 /**
3634  * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
3635  * @hw: pointer to hardware structure
3636  * @san_mac_addr: SAN MAC address
3637  *
3638  * Write a SAN MAC address to the EEPROM.
3639  **/
3640 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3641 {
3642 	s32 ret_val;
3643 	u16 san_mac_data, san_mac_offset;
3644 	u8 i;
3645 
3646 	DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
3647 
3648 	/* Look for SAN mac address pointer.  If not defined, return */
3649 	ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3650 	if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3651 		return IXGBE_ERR_NO_SAN_ADDR_PTR;
3652 
3653 	/* Make sure we know which port we need to write */
3654 	hw->mac.ops.set_lan_id(hw);
3655 	/* Apply the port offset to the address offset */
3656 	(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3657 			 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3658 
3659 	for (i = 0; i < 3; i++) {
3660 		san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
3661 		san_mac_data |= (u16)(san_mac_addr[i * 2]);
3662 		hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
3663 		san_mac_offset++;
3664 	}
3665 
3666 	return IXGBE_SUCCESS;
3667 }
3668 
3669 /**
3670  * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
3671  * @hw: pointer to hardware structure
3672  *
3673  * Read PCIe configuration space, and get the MSI-X vector count from
3674  * the capabilities table.
3675  **/
3676 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
3677 {
3678 	u16 msix_count = 1;
3679 	u16 max_msix_count;
3680 	u16 pcie_offset;
3681 
3682 	switch (hw->mac.type) {
3683 	case ixgbe_mac_82598EB:
3684 		pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
3685 		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
3686 		break;
3687 	case ixgbe_mac_82599EB:
3688 	case ixgbe_mac_X540:
3689 	case ixgbe_mac_X550:
3690 	case ixgbe_mac_X550EM_x:
3691 	case ixgbe_mac_X550EM_a:
3692 		pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
3693 		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
3694 		break;
3695 	default:
3696 		return msix_count;
3697 	}
3698 
3699 	DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
3700 	msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
3701 	if (IXGBE_REMOVED(hw->hw_addr))
3702 		msix_count = 0;
3703 	msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
3704 
3705 	/* MSI-X count is zero-based in HW */
3706 	msix_count++;
3707 
3708 	if (msix_count > max_msix_count)
3709 		msix_count = max_msix_count;
3710 
3711 	return msix_count;
3712 }
3713 
3714 /**
3715  * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
3716  * @hw: pointer to hardware structure
3717  * @addr: Address to put into receive address register
3718  * @vmdq: VMDq pool to assign
3719  *
3720  * Puts an ethernet address into a receive address register, or
3721  * finds the rar that it is already in; adds to the pool list
3722  **/
3723 s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
3724 {
3725 	static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
3726 	u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
3727 	u32 rar;
3728 	u32 rar_low, rar_high;
3729 	u32 addr_low, addr_high;
3730 
3731 	DEBUGFUNC("ixgbe_insert_mac_addr_generic");
3732 
3733 	/* swap bytes for HW little endian */
3734 	addr_low  = addr[0] | (addr[1] << 8)
3735 			    | (addr[2] << 16)
3736 			    | (addr[3] << 24);
3737 	addr_high = addr[4] | (addr[5] << 8);
3738 
3739 	/*
3740 	 * Either find the mac_id in rar or find the first empty space.
3741 	 * rar_highwater points to just after the highest currently used
3742 	 * rar in order to shorten the search.  It grows when we add a new
3743 	 * rar to the top.
3744 	 */
3745 	for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
3746 		rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
3747 
3748 		if (((IXGBE_RAH_AV & rar_high) == 0)
3749 		    && first_empty_rar == NO_EMPTY_RAR_FOUND) {
3750 			first_empty_rar = rar;
3751 		} else if ((rar_high & 0xFFFF) == addr_high) {
3752 			rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
3753 			if (rar_low == addr_low)
3754 				break;    /* found it already in the rars */
3755 		}
3756 	}
3757 
3758 	if (rar < hw->mac.rar_highwater) {
3759 		/* already there so just add to the pool bits */
3760 		ixgbe_set_vmdq(hw, rar, vmdq);
3761 	} else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3762 		/* stick it into first empty RAR slot we found */
3763 		rar = first_empty_rar;
3764 		ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3765 	} else if (rar == hw->mac.rar_highwater) {
3766 		/* add it to the top of the list and inc the highwater mark */
3767 		ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3768 		hw->mac.rar_highwater++;
3769 	} else if (rar >= hw->mac.num_rar_entries) {
3770 		return IXGBE_ERR_INVALID_MAC_ADDR;
3771 	}
3772 
3773 	/*
3774 	 * If we found rar[0], make sure the default pool bit (we use pool 0)
3775 	 * remains cleared to be sure default pool packets will get delivered
3776 	 */
3777 	if (rar == 0)
3778 		ixgbe_clear_vmdq(hw, rar, 0);
3779 
3780 	return rar;
3781 }
3782 
3783 /**
3784  * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3785  * @hw: pointer to hardware struct
3786  * @rar: receive address register index to disassociate
3787  * @vmdq: VMDq pool index to remove from the rar
3788  **/
3789 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3790 {
3791 	u32 mpsar_lo, mpsar_hi;
3792 	u32 rar_entries = hw->mac.num_rar_entries;
3793 
3794 	DEBUGFUNC("ixgbe_clear_vmdq_generic");
3795 
3796 	/* Make sure we are using a valid rar index range */
3797 	if (rar >= rar_entries) {
3798 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3799 			     "RAR index %d is out of range.\n", rar);
3800 		return IXGBE_ERR_INVALID_ARGUMENT;
3801 	}
3802 
3803 	mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3804 	mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3805 
3806 	if (IXGBE_REMOVED(hw->hw_addr))
3807 		goto done;
3808 
3809 	if (!mpsar_lo && !mpsar_hi)
3810 		goto done;
3811 
3812 	if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
3813 		if (mpsar_lo) {
3814 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3815 			mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3816 		}
3817 		if (mpsar_hi) {
3818 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3819 			mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3820 		}
3821 	} else if (vmdq < 32) {
3822 		mpsar_lo &= ~(1 << vmdq);
3823 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
3824 	} else {
3825 		mpsar_hi &= ~(1 << (vmdq - 32));
3826 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
3827 	}
3828 
3829 	/* was that the last pool using this rar? */
3830 	if (mpsar_lo == 0 && mpsar_hi == 0 &&
3831 	    rar != 0 && rar != hw->mac.san_mac_rar_index)
3832 		hw->mac.ops.clear_rar(hw, rar);
3833 done:
3834 	return IXGBE_SUCCESS;
3835 }
3836 
3837 /**
3838  * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
3839  * @hw: pointer to hardware struct
3840  * @rar: receive address register index to associate with a VMDq index
3841  * @vmdq: VMDq pool index
3842  **/
3843 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3844 {
3845 	u32 mpsar;
3846 	u32 rar_entries = hw->mac.num_rar_entries;
3847 
3848 	DEBUGFUNC("ixgbe_set_vmdq_generic");
3849 
3850 	/* Make sure we are using a valid rar index range */
3851 	if (rar >= rar_entries) {
3852 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3853 			     "RAR index %d is out of range.\n", rar);
3854 		return IXGBE_ERR_INVALID_ARGUMENT;
3855 	}
3856 
3857 	if (vmdq < 32) {
3858 		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3859 		mpsar |= 1 << vmdq;
3860 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3861 	} else {
3862 		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3863 		mpsar |= 1 << (vmdq - 32);
3864 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3865 	}
3866 	return IXGBE_SUCCESS;
3867 }
3868 
3869 /**
3870  * ixgbe_set_vmdq_san_mac_generic - Associate default VMDq pool index with
3871  * a rx address
3872  * @hw: pointer to hardware struct
3873  * @vmdq: VMDq pool index
3874  *
3875  * This function should only be involved in the IOV mode.
3876  * In IOV mode, Default pool is next pool after the number of
3877  * VFs advertized and not 0.
3878  * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
3879  **/
3880 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
3881 {
3882 	u32 rar = hw->mac.san_mac_rar_index;
3883 
3884 	DEBUGFUNC("ixgbe_set_vmdq_san_mac");
3885 
3886 	if (vmdq < 32) {
3887 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
3888 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3889 	} else {
3890 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3891 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
3892 	}
3893 
3894 	return IXGBE_SUCCESS;
3895 }
3896 
3897 /**
3898  * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3899  * @hw: pointer to hardware structure
3900  **/
3901 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3902 {
3903 	int i;
3904 
3905 	DEBUGFUNC("ixgbe_init_uta_tables_generic");
3906 	DEBUGOUT(" Clearing UTA\n");
3907 
3908 	for (i = 0; i < 128; i++)
3909 		IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3910 
3911 	return IXGBE_SUCCESS;
3912 }
3913 
3914 /**
3915  * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3916  * @hw: pointer to hardware structure
3917  * @vlan: VLAN id to write to VLAN filter
3918  * @vlvf_bypass: true to find vlanid only, false returns first empty slot if
3919  *		  vlanid not found
3920  *
3921  *
3922  * return the VLVF index where this VLAN id should be placed
3923  *
3924  **/
3925 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
3926 {
3927 	s32 regindex, first_empty_slot;
3928 	u32 bits;
3929 
3930 	/* short cut the special case */
3931 	if (vlan == 0)
3932 		return 0;
3933 
3934 	/* if vlvf_bypass is set we don't want to use an empty slot, we
3935 	 * will simply bypass the VLVF if there are no entries present in the
3936 	 * VLVF that contain our VLAN
3937 	 */
3938 	first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0;
3939 
3940 	/* add VLAN enable bit for comparison */
3941 	vlan |= IXGBE_VLVF_VIEN;
3942 
3943 	/* Search for the vlan id in the VLVF entries. Save off the first empty
3944 	 * slot found along the way.
3945 	 *
3946 	 * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1
3947 	 */
3948 	for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) {
3949 		bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3950 		if (bits == vlan)
3951 			return regindex;
3952 		if (!first_empty_slot && !bits)
3953 			first_empty_slot = regindex;
3954 	}
3955 
3956 	/* If we are here then we didn't find the VLAN.  Return first empty
3957 	 * slot we found during our search, else error.
3958 	 */
3959 	if (!first_empty_slot)
3960 		ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "No space in VLVF.\n");
3961 
3962 	return first_empty_slot ? first_empty_slot : IXGBE_ERR_NO_SPACE;
3963 }
3964 
3965 /**
3966  * ixgbe_set_vfta_generic - Set VLAN filter table
3967  * @hw: pointer to hardware structure
3968  * @vlan: VLAN id to write to VLAN filter
3969  * @vind: VMDq output index that maps queue to VLAN id in VLVFB
3970  * @vlan_on: boolean flag to turn on/off VLAN
3971  * @vlvf_bypass: boolean flag indicating updating default pool is okay
3972  *
3973  * Turn on/off specified VLAN in the VLAN filter table.
3974  **/
3975 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3976 			   bool vlan_on, bool vlvf_bypass)
3977 {
3978 	u32 regidx, vfta_delta, vfta;
3979 	s32 ret_val;
3980 
3981 	DEBUGFUNC("ixgbe_set_vfta_generic");
3982 
3983 	if (vlan > 4095 || vind > 63)
3984 		return IXGBE_ERR_PARAM;
3985 
3986 	/*
3987 	 * this is a 2 part operation - first the VFTA, then the
3988 	 * VLVF and VLVFB if VT Mode is set
3989 	 * We don't write the VFTA until we know the VLVF part succeeded.
3990 	 */
3991 
3992 	/* Part 1
3993 	 * The VFTA is a bitstring made up of 128 32-bit registers
3994 	 * that enable the particular VLAN id, much like the MTA:
3995 	 *    bits[11-5]: which register
3996 	 *    bits[4-0]:  which bit in the register
3997 	 */
3998 	regidx = vlan / 32;
3999 	vfta_delta = 1 << (vlan % 32);
4000 	vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx));
4001 
4002 	/*
4003 	 * vfta_delta represents the difference between the current value
4004 	 * of vfta and the value we want in the register.  Since the diff
4005 	 * is an XOR mask we can just update the vfta using an XOR
4006 	 */
4007 	vfta_delta &= vlan_on ? ~vfta : vfta;
4008 	vfta ^= vfta_delta;
4009 
4010 	/* Part 2
4011 	 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
4012 	 */
4013 	ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, &vfta_delta,
4014 					 vfta, vlvf_bypass);
4015 	if (ret_val != IXGBE_SUCCESS) {
4016 		if (vlvf_bypass)
4017 			goto vfta_update;
4018 		return ret_val;
4019 	}
4020 
4021 vfta_update:
4022 	/* Update VFTA now that we are ready for traffic */
4023 	if (vfta_delta)
4024 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta);
4025 
4026 	return IXGBE_SUCCESS;
4027 }
4028 
4029 /**
4030  * ixgbe_set_vlvf_generic - Set VLAN Pool Filter
4031  * @hw: pointer to hardware structure
4032  * @vlan: VLAN id to write to VLAN filter
4033  * @vind: VMDq output index that maps queue to VLAN id in VLVFB
4034  * @vlan_on: boolean flag to turn on/off VLAN in VLVF
4035  * @vfta_delta: pointer to the difference between the current value of VFTA
4036  *		 and the desired value
4037  * @vfta: the desired value of the VFTA
4038  * @vlvf_bypass: boolean flag indicating updating default pool is okay
4039  *
4040  * Turn on/off specified bit in VLVF table.
4041  **/
4042 s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
4043 			   bool vlan_on, u32 *vfta_delta, u32 vfta,
4044 			   bool vlvf_bypass)
4045 {
4046 	u32 bits;
4047 	s32 vlvf_index;
4048 
4049 	DEBUGFUNC("ixgbe_set_vlvf_generic");
4050 
4051 	if (vlan > 4095 || vind > 63)
4052 		return IXGBE_ERR_PARAM;
4053 
4054 	/* If VT Mode is set
4055 	 *   Either vlan_on
4056 	 *     make sure the vlan is in VLVF
4057 	 *     set the vind bit in the matching VLVFB
4058 	 *   Or !vlan_on
4059 	 *     clear the pool bit and possibly the vind
4060 	 */
4061 	if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE))
4062 		return IXGBE_SUCCESS;
4063 
4064 	vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass);
4065 	if (vlvf_index < 0)
4066 		return vlvf_index;
4067 
4068 	bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32));
4069 
4070 	/* set the pool bit */
4071 	bits |= 1 << (vind % 32);
4072 	if (vlan_on)
4073 		goto vlvf_update;
4074 
4075 	/* clear the pool bit */
4076 	bits ^= 1 << (vind % 32);
4077 
4078 	if (!bits &&
4079 	    !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) {
4080 		/* Clear VFTA first, then disable VLVF.  Otherwise
4081 		 * we run the risk of stray packets leaking into
4082 		 * the PF via the default pool
4083 		 */
4084 		if (*vfta_delta)
4085 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(vlan / 32), vfta);
4086 
4087 		/* disable VLVF and clear remaining bit from pool */
4088 		IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
4089 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0);
4090 
4091 		return IXGBE_SUCCESS;
4092 	}
4093 
4094 	/* If there are still bits set in the VLVFB registers
4095 	 * for the VLAN ID indicated we need to see if the
4096 	 * caller is requesting that we clear the VFTA entry bit.
4097 	 * If the caller has requested that we clear the VFTA
4098 	 * entry bit but there are still pools/VFs using this VLAN
4099 	 * ID entry then ignore the request.  We're not worried
4100 	 * about the case where we're turning the VFTA VLAN ID
4101 	 * entry bit on, only when requested to turn it off as
4102 	 * there may be multiple pools and/or VFs using the
4103 	 * VLAN ID entry.  In that case we cannot clear the
4104 	 * VFTA bit until all pools/VFs using that VLAN ID have also
4105 	 * been cleared.  This will be indicated by "bits" being
4106 	 * zero.
4107 	 */
4108 	*vfta_delta = 0;
4109 
4110 vlvf_update:
4111 	/* record pool change and enable VLAN ID if not already enabled */
4112 	IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits);
4113 	IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan);
4114 
4115 	return IXGBE_SUCCESS;
4116 }
4117 
4118 /**
4119  * ixgbe_clear_vfta_generic - Clear VLAN filter table
4120  * @hw: pointer to hardware structure
4121  *
4122  * Clears the VLAN filter table, and the VMDq index associated with the filter
4123  **/
4124 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
4125 {
4126 	u32 offset;
4127 
4128 	DEBUGFUNC("ixgbe_clear_vfta_generic");
4129 
4130 	for (offset = 0; offset < hw->mac.vft_size; offset++)
4131 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
4132 
4133 	for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
4134 		IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
4135 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
4136 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0);
4137 	}
4138 
4139 	return IXGBE_SUCCESS;
4140 }
4141 
4142 
4143 /**
4144  * ixgbe_toggle_txdctl_generic - Toggle VF's queues
4145  * @hw: pointer to hardware structure
4146  * @vf_number: VF index
4147  *
4148  * Enable and disable each queue in VF.
4149  */
4150 s32 ixgbe_toggle_txdctl_generic(struct ixgbe_hw *hw, u32 vf_number)
4151 {
4152 	u8  queue_count, i;
4153 	u32 offset, reg;
4154 
4155 	if (vf_number > 63)
4156 		return IXGBE_ERR_PARAM;
4157 
4158 	/*
4159 	 * Determine number of queues by checking
4160 	 * number of virtual functions
4161 	 */
4162 	reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
4163 	switch (reg & IXGBE_GCR_EXT_VT_MODE_MASK) {
4164 	case IXGBE_GCR_EXT_VT_MODE_64:
4165 		queue_count = 2;
4166 		break;
4167 	case IXGBE_GCR_EXT_VT_MODE_32:
4168 		queue_count = 4;
4169 		break;
4170 	case IXGBE_GCR_EXT_VT_MODE_16:
4171 		queue_count = 8;
4172 		break;
4173 	default:
4174 		return IXGBE_ERR_CONFIG;
4175 	}
4176 
4177 	/* Toggle queues */
4178 	for (i = 0; i < queue_count; ++i) {
4179 		/* Calculate offset of current queue */
4180 		offset = queue_count * vf_number + i;
4181 
4182 		/* Enable queue */
4183 		reg = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(offset));
4184 		reg |= IXGBE_TXDCTL_ENABLE;
4185 		IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(offset), reg);
4186 		IXGBE_WRITE_FLUSH(hw);
4187 
4188 		/* Disable queue */
4189 		reg = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(offset));
4190 		reg &= ~IXGBE_TXDCTL_ENABLE;
4191 		IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(offset), reg);
4192 		IXGBE_WRITE_FLUSH(hw);
4193 	}
4194 
4195 	return IXGBE_SUCCESS;
4196 }
4197 
4198 /**
4199  * ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix
4200  * @hw: pointer to hardware structure
4201  *
4202  * Contains the logic to identify if we need to verify link for the
4203  * crosstalk fix
4204  **/
4205 static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw)
4206 {
4207 
4208 	/* Does FW say we need the fix */
4209 	if (!hw->need_crosstalk_fix)
4210 		return false;
4211 
4212 	/* Only consider SFP+ PHYs i.e. media type fiber */
4213 	switch (hw->mac.ops.get_media_type(hw)) {
4214 	case ixgbe_media_type_fiber:
4215 	case ixgbe_media_type_fiber_qsfp:
4216 		break;
4217 	default:
4218 		return false;
4219 	}
4220 
4221 	return true;
4222 }
4223 
4224 /**
4225  * ixgbe_check_mac_link_generic - Determine link and speed status
4226  * @hw: pointer to hardware structure
4227  * @speed: pointer to link speed
4228  * @link_up: true when link is up
4229  * @link_up_wait_to_complete: bool used to wait for link up or not
4230  *
4231  * Reads the links register to determine if link is up and the current speed
4232  **/
4233 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4234 				 bool *link_up, bool link_up_wait_to_complete)
4235 {
4236 	u32 links_reg, links_orig;
4237 	u32 i;
4238 
4239 	DEBUGFUNC("ixgbe_check_mac_link_generic");
4240 
4241 	/* If Crosstalk fix enabled do the sanity check of making sure
4242 	 * the SFP+ cage is full.
4243 	 */
4244 	if (ixgbe_need_crosstalk_fix(hw)) {
4245 		u32 sfp_cage_full;
4246 
4247 		switch (hw->mac.type) {
4248 		case ixgbe_mac_82599EB:
4249 			sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4250 					IXGBE_ESDP_SDP2;
4251 			break;
4252 		case ixgbe_mac_X550EM_x:
4253 		case ixgbe_mac_X550EM_a:
4254 			sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4255 					IXGBE_ESDP_SDP0;
4256 			break;
4257 		default:
4258 			/* sanity check - No SFP+ devices here */
4259 			sfp_cage_full = false;
4260 			break;
4261 		}
4262 
4263 		if (!sfp_cage_full) {
4264 			*link_up = false;
4265 			*speed = IXGBE_LINK_SPEED_UNKNOWN;
4266 			return IXGBE_SUCCESS;
4267 		}
4268 	}
4269 
4270 	/* clear the old state */
4271 	links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
4272 
4273 	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4274 
4275 	if (links_orig != links_reg) {
4276 		DEBUGOUT2("LINKS changed from %08X to %08X\n",
4277 			  links_orig, links_reg);
4278 	}
4279 
4280 	if (link_up_wait_to_complete) {
4281 		for (i = 0; i < hw->mac.max_link_up_time; i++) {
4282 			if (links_reg & IXGBE_LINKS_UP) {
4283 				*link_up = true;
4284 				break;
4285 			} else {
4286 				*link_up = false;
4287 			}
4288 			msec_delay(100);
4289 			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4290 		}
4291 	} else {
4292 		if (links_reg & IXGBE_LINKS_UP) {
4293 			if (ixgbe_need_crosstalk_fix(hw)) {
4294 				/* Check the link state again after a delay
4295 				 * to filter out spurious link up
4296 				 * notifications.
4297 				 */
4298 				msec_delay(5);
4299 				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4300 				if (!(links_reg & IXGBE_LINKS_UP)) {
4301 					*link_up = false;
4302 					*speed = IXGBE_LINK_SPEED_UNKNOWN;
4303 					return IXGBE_SUCCESS;
4304 				}
4305 
4306 			}
4307 			*link_up = true;
4308 		} else {
4309 			*link_up = false;
4310 		}
4311 	}
4312 
4313 	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
4314 	case IXGBE_LINKS_SPEED_10G_82599:
4315 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
4316 		if (hw->mac.type >= ixgbe_mac_X550) {
4317 			if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4318 				*speed = IXGBE_LINK_SPEED_2_5GB_FULL;
4319 		}
4320 		break;
4321 	case IXGBE_LINKS_SPEED_1G_82599:
4322 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
4323 		break;
4324 	case IXGBE_LINKS_SPEED_100_82599:
4325 		*speed = IXGBE_LINK_SPEED_100_FULL;
4326 		if (hw->mac.type == ixgbe_mac_X550) {
4327 			if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4328 				*speed = IXGBE_LINK_SPEED_5GB_FULL;
4329 		}
4330 		break;
4331 	case IXGBE_LINKS_SPEED_10_X550EM_A:
4332 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
4333 		if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
4334 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
4335 			*speed = IXGBE_LINK_SPEED_10_FULL;
4336 		break;
4337 	default:
4338 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
4339 	}
4340 
4341 	return IXGBE_SUCCESS;
4342 }
4343 
4344 /**
4345  * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
4346  * the EEPROM
4347  * @hw: pointer to hardware structure
4348  * @wwnn_prefix: the alternative WWNN prefix
4349  * @wwpn_prefix: the alternative WWPN prefix
4350  *
4351  * This function will read the EEPROM from the alternative SAN MAC address
4352  * block to check the support for the alternative WWNN/WWPN prefix support.
4353  **/
4354 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
4355 				 u16 *wwpn_prefix)
4356 {
4357 	u16 offset, caps;
4358 	u16 alt_san_mac_blk_offset;
4359 
4360 	DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
4361 
4362 	/* clear output first */
4363 	*wwnn_prefix = 0xFFFF;
4364 	*wwpn_prefix = 0xFFFF;
4365 
4366 	/* check if alternative SAN MAC is supported */
4367 	offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
4368 	if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
4369 		goto wwn_prefix_err;
4370 
4371 	if ((alt_san_mac_blk_offset == 0) ||
4372 	    (alt_san_mac_blk_offset == 0xFFFF))
4373 		goto wwn_prefix_out;
4374 
4375 	/* check capability in alternative san mac address block */
4376 	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
4377 	if (hw->eeprom.ops.read(hw, offset, &caps))
4378 		goto wwn_prefix_err;
4379 	if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
4380 		goto wwn_prefix_out;
4381 
4382 	/* get the corresponding prefix for WWNN/WWPN */
4383 	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
4384 	if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) {
4385 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4386 			      "eeprom read at offset %d failed", offset);
4387 	}
4388 
4389 	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
4390 	if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
4391 		goto wwn_prefix_err;
4392 
4393 wwn_prefix_out:
4394 	return IXGBE_SUCCESS;
4395 
4396 wwn_prefix_err:
4397 	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4398 		      "eeprom read at offset %d failed", offset);
4399 	return IXGBE_SUCCESS;
4400 }
4401 
4402 /**
4403  * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
4404  * @hw: pointer to hardware structure
4405  * @bs: the fcoe boot status
4406  *
4407  * This function will read the FCOE boot status from the iSCSI FCOE block
4408  **/
4409 s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
4410 {
4411 	u16 offset, caps, flags;
4412 	s32 status;
4413 
4414 	DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
4415 
4416 	/* clear output first */
4417 	*bs = ixgbe_fcoe_bootstatus_unavailable;
4418 
4419 	/* check if FCOE IBA block is present */
4420 	offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
4421 	status = hw->eeprom.ops.read(hw, offset, &caps);
4422 	if (status != IXGBE_SUCCESS)
4423 		goto out;
4424 
4425 	if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
4426 		goto out;
4427 
4428 	/* check if iSCSI FCOE block is populated */
4429 	status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
4430 	if (status != IXGBE_SUCCESS)
4431 		goto out;
4432 
4433 	if ((offset == 0) || (offset == 0xFFFF))
4434 		goto out;
4435 
4436 	/* read fcoe flags in iSCSI FCOE block */
4437 	offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
4438 	status = hw->eeprom.ops.read(hw, offset, &flags);
4439 	if (status != IXGBE_SUCCESS)
4440 		goto out;
4441 
4442 	if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
4443 		*bs = ixgbe_fcoe_bootstatus_enabled;
4444 	else
4445 		*bs = ixgbe_fcoe_bootstatus_disabled;
4446 
4447 out:
4448 	return status;
4449 }
4450 
4451 /**
4452  * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
4453  * @hw: pointer to hardware structure
4454  * @enable: enable or disable switch for MAC anti-spoofing
4455  * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing
4456  *
4457  **/
4458 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4459 {
4460 	int vf_target_reg = vf >> 3;
4461 	int vf_target_shift = vf % 8;
4462 	u32 pfvfspoof;
4463 
4464 	if (hw->mac.type == ixgbe_mac_82598EB)
4465 		return;
4466 
4467 	pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4468 	if (enable)
4469 		pfvfspoof |= (1 << vf_target_shift);
4470 	else
4471 		pfvfspoof &= ~(1 << vf_target_shift);
4472 	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4473 }
4474 
4475 /**
4476  * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
4477  * @hw: pointer to hardware structure
4478  * @enable: enable or disable switch for VLAN anti-spoofing
4479  * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
4480  *
4481  **/
4482 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4483 {
4484 	int vf_target_reg = vf >> 3;
4485 	int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
4486 	u32 pfvfspoof;
4487 
4488 	if (hw->mac.type == ixgbe_mac_82598EB)
4489 		return;
4490 
4491 	pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4492 	if (enable)
4493 		pfvfspoof |= (1 << vf_target_shift);
4494 	else
4495 		pfvfspoof &= ~(1 << vf_target_shift);
4496 	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4497 }
4498 
4499 /**
4500  * ixgbe_get_device_caps_generic - Get additional device capabilities
4501  * @hw: pointer to hardware structure
4502  * @device_caps: the EEPROM word with the extra device capabilities
4503  *
4504  * This function will read the EEPROM location for the device capabilities,
4505  * and return the word through device_caps.
4506  **/
4507 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
4508 {
4509 	DEBUGFUNC("ixgbe_get_device_caps_generic");
4510 
4511 	hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
4512 
4513 	return IXGBE_SUCCESS;
4514 }
4515 
4516 /**
4517  * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
4518  * @hw: pointer to hardware structure
4519  *
4520  **/
4521 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
4522 {
4523 	u32 regval;
4524 	u32 i;
4525 
4526 	DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
4527 
4528 	/* Enable relaxed ordering */
4529 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
4530 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
4531 		regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4532 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
4533 	}
4534 
4535 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
4536 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
4537 		regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
4538 			  IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
4539 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
4540 	}
4541 
4542 }
4543 
4544 /**
4545  * ixgbe_calculate_checksum - Calculate checksum for buffer
4546  * @buffer: pointer to EEPROM
4547  * @length: size of EEPROM to calculate a checksum for
4548  * Calculates the checksum for some buffer on a specified length.  The
4549  * checksum calculated is returned.
4550  **/
4551 u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
4552 {
4553 	u32 i;
4554 	u8 sum = 0;
4555 
4556 	DEBUGFUNC("ixgbe_calculate_checksum");
4557 
4558 	if (!buffer)
4559 		return 0;
4560 
4561 	for (i = 0; i < length; i++)
4562 		sum += buffer[i];
4563 
4564 	return (u8) (0 - sum);
4565 }
4566 
4567 /**
4568  * ixgbe_hic_unlocked - Issue command to manageability block unlocked
4569  * @hw: pointer to the HW structure
4570  * @buffer: command to write and where the return status will be placed
4571  * @length: length of buffer, must be multiple of 4 bytes
4572  * @timeout: time in ms to wait for command completion
4573  *
4574  * Communicates with the manageability block. On success return IXGBE_SUCCESS
4575  * else returns semaphore error when encountering an error acquiring
4576  * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4577  *
4578  * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held
4579  * by the caller.
4580  **/
4581 s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
4582 		       u32 timeout)
4583 {
4584 	u32 hicr, i, fwsts;
4585 	u16 dword_len;
4586 
4587 	DEBUGFUNC("ixgbe_hic_unlocked");
4588 
4589 	if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4590 		DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
4591 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4592 	}
4593 
4594 	/* Set bit 9 of FWSTS clearing FW reset indication */
4595 	fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
4596 	IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
4597 
4598 	/* Check that the host interface is enabled. */
4599 	hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4600 	if (!(hicr & IXGBE_HICR_EN)) {
4601 		DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
4602 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4603 	}
4604 
4605 	/* Calculate length in DWORDs. We must be DWORD aligned */
4606 	if (length % sizeof(u32)) {
4607 		DEBUGOUT("Buffer length failure, not aligned to dword");
4608 		return IXGBE_ERR_INVALID_ARGUMENT;
4609 	}
4610 
4611 	dword_len = length >> 2;
4612 
4613 	/* The device driver writes the relevant command block
4614 	 * into the ram area.
4615 	 */
4616 	for (i = 0; i < dword_len; i++)
4617 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
4618 				      i, IXGBE_CPU_TO_LE32(buffer[i]));
4619 
4620 	/* Setting this bit tells the ARC that a new command is pending. */
4621 	IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
4622 
4623 	for (i = 0; i < timeout; i++) {
4624 		hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4625 		if (!(hicr & IXGBE_HICR_C))
4626 			break;
4627 		msec_delay(1);
4628 	}
4629 
4630 	/* For each command except "Apply Update" perform
4631 	 * status checks in the HICR registry.
4632 	 */
4633 	if ((buffer[0] & IXGBE_HOST_INTERFACE_MASK_CMD) ==
4634 	    IXGBE_HOST_INTERFACE_APPLY_UPDATE_CMD)
4635 		return IXGBE_SUCCESS;
4636 
4637 	/* Check command completion */
4638 	if ((timeout && i == timeout) ||
4639 	    !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
4640 		ERROR_REPORT1(IXGBE_ERROR_CAUTION,
4641 			      "Command has failed with no status valid.\n");
4642 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4643 	}
4644 
4645 	return IXGBE_SUCCESS;
4646 }
4647 
4648 /**
4649  * ixgbe_host_interface_command - Issue command to manageability block
4650  * @hw: pointer to the HW structure
4651  * @buffer: contains the command to write and where the return status will
4652  *  be placed
4653  * @length: length of buffer, must be multiple of 4 bytes
4654  * @timeout: time in ms to wait for command completion
4655  * @return_data: read and return data from the buffer (true) or not (false)
4656  *  Needed because FW structures are big endian and decoding of
4657  *  these fields can be 8 bit or 16 bit based on command. Decoding
4658  *  is not easily understood without making a table of commands.
4659  *  So we will leave this up to the caller to read back the data
4660  *  in these cases.
4661  *
4662  * Communicates with the manageability block. On success return IXGBE_SUCCESS
4663  * else returns semaphore error when encountering an error acquiring
4664  * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4665  **/
4666 s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
4667 				 u32 length, u32 timeout, bool return_data)
4668 {
4669 	u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
4670 	struct ixgbe_hic_hdr *resp = (struct ixgbe_hic_hdr *)buffer;
4671 	u16 buf_len;
4672 	s32 status;
4673 	u32 bi;
4674 	u32 dword_len;
4675 
4676 	DEBUGFUNC("ixgbe_host_interface_command");
4677 
4678 	if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4679 		DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
4680 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4681 	}
4682 
4683 	/* Take management host interface semaphore */
4684 	status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4685 	if (status)
4686 		return status;
4687 
4688 	status = ixgbe_hic_unlocked(hw, buffer, length, timeout);
4689 	if (status)
4690 		goto rel_out;
4691 
4692 	if (!return_data)
4693 		goto rel_out;
4694 
4695 	/* Calculate length in DWORDs */
4696 	dword_len = hdr_size >> 2;
4697 
4698 	/* first pull in the header so we know the buffer length */
4699 	for (bi = 0; bi < dword_len; bi++) {
4700 		buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4701 		IXGBE_LE32_TO_CPUS(&buffer[bi]);
4702 	}
4703 
4704 	/*
4705 	 * If there is any thing in data position pull it in
4706 	 * Read Flash command requires reading buffer length from
4707 	 * two byes instead of one byte
4708 	 */
4709 	if (resp->cmd == IXGBE_HOST_INTERFACE_FLASH_READ_CMD ||
4710 	    resp->cmd == IXGBE_HOST_INTERFACE_SHADOW_RAM_READ_CMD) {
4711 		for (; bi < dword_len + 2; bi++) {
4712 			buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
4713 							  bi);
4714 			IXGBE_LE32_TO_CPUS(&buffer[bi]);
4715 		}
4716 		buf_len = (((u16)(resp->cmd_or_resp.ret_status) << 3)
4717 				  & 0xF00) | resp->buf_len;
4718 		hdr_size += (2 << 2);
4719 	} else {
4720 		buf_len = resp->buf_len;
4721 	}
4722 	if (!buf_len)
4723 		goto rel_out;
4724 
4725 	if (length < buf_len + hdr_size) {
4726 		DEBUGOUT("Buffer not large enough for reply message.\n");
4727 		status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4728 		goto rel_out;
4729 	}
4730 
4731 	/* Calculate length in DWORDs, add 3 for odd lengths */
4732 	dword_len = (buf_len + 3) >> 2;
4733 
4734 	/* Pull in the rest of the buffer (bi is where we left off) */
4735 	for (; bi <= dword_len; bi++) {
4736 		buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4737 		IXGBE_LE32_TO_CPUS(&buffer[bi]);
4738 	}
4739 
4740 rel_out:
4741 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4742 
4743 	return status;
4744 }
4745 
4746 /**
4747  * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
4748  * @hw: pointer to the HW structure
4749  * @maj: driver version major number
4750  * @min: driver version minor number
4751  * @build: driver version build number
4752  * @sub: driver version sub build number
4753  * @len: unused
4754  * @driver_ver: unused
4755  *
4756  * Sends driver version number to firmware through the manageability
4757  * block.  On success return IXGBE_SUCCESS
4758  * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4759  * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4760  **/
4761 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
4762 				 u8 build, u8 sub, u16 len,
4763 				 const char *driver_ver)
4764 {
4765 	struct ixgbe_hic_drv_info fw_cmd;
4766 	int i;
4767 	s32 ret_val = IXGBE_SUCCESS;
4768 
4769 	DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
4770 	UNREFERENCED_2PARAMETER(len, driver_ver);
4771 
4772 	fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4773 	fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
4774 	fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4775 	fw_cmd.port_num = (u8)hw->bus.func;
4776 	fw_cmd.ver_maj = maj;
4777 	fw_cmd.ver_min = min;
4778 	fw_cmd.ver_build = build;
4779 	fw_cmd.ver_sub = sub;
4780 	fw_cmd.hdr.checksum = 0;
4781 	fw_cmd.pad = 0;
4782 	fw_cmd.pad2 = 0;
4783 	fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4784 				(FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4785 
4786 	for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4787 		ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4788 						       sizeof(fw_cmd),
4789 						       IXGBE_HI_COMMAND_TIMEOUT,
4790 						       true);
4791 		if (ret_val != IXGBE_SUCCESS)
4792 			continue;
4793 
4794 		if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4795 		    FW_CEM_RESP_STATUS_SUCCESS)
4796 			ret_val = IXGBE_SUCCESS;
4797 		else
4798 			ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4799 
4800 		break;
4801 	}
4802 
4803 	return ret_val;
4804 }
4805 
4806 /**
4807  * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
4808  * @hw: pointer to hardware structure
4809  * @num_pb: number of packet buffers to allocate
4810  * @headroom: reserve n KB of headroom
4811  * @strategy: packet buffer allocation strategy
4812  **/
4813 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
4814 			     int strategy)
4815 {
4816 	u32 pbsize = hw->mac.rx_pb_size;
4817 	int i = 0;
4818 	u32 rxpktsize, txpktsize, txpbthresh;
4819 
4820 	/* Reserve headroom */
4821 	pbsize -= headroom;
4822 
4823 	if (!num_pb)
4824 		num_pb = 1;
4825 
4826 	/* Divide remaining packet buffer space amongst the number of packet
4827 	 * buffers requested using supplied strategy.
4828 	 */
4829 	switch (strategy) {
4830 	case PBA_STRATEGY_WEIGHTED:
4831 		/* ixgbe_dcb_pba_80_48 strategy weight first half of packet
4832 		 * buffer with 5/8 of the packet buffer space.
4833 		 */
4834 		rxpktsize = (pbsize * 5) / (num_pb * 4);
4835 		pbsize -= rxpktsize * (num_pb / 2);
4836 		rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
4837 		for (; i < (num_pb / 2); i++)
4838 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4839 		rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
4840 		for (; i < num_pb; i++)
4841 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4842 		break;
4843 	case PBA_STRATEGY_EQUAL:
4844 		rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
4845 		for (; i < num_pb; i++)
4846 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4847 		break;
4848 	default:
4849 		break;
4850 	}
4851 
4852 	/* Only support an equally distributed Tx packet buffer strategy. */
4853 	txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
4854 	txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
4855 	for (i = 0; i < num_pb; i++) {
4856 		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4857 		IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4858 	}
4859 
4860 	/* Clear unused TCs, if any, to zero buffer size*/
4861 	for (; i < IXGBE_MAX_PB; i++) {
4862 		IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4863 		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4864 		IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4865 	}
4866 }
4867 
4868 /**
4869  * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
4870  * @hw: pointer to the hardware structure
4871  *
4872  * The 82599 and x540 MACs can experience issues if TX work is still pending
4873  * when a reset occurs.  This function prevents this by flushing the PCIe
4874  * buffers on the system.
4875  **/
4876 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
4877 {
4878 	u32 gcr_ext, hlreg0, i, poll;
4879 	u16 value;
4880 
4881 	/*
4882 	 * If double reset is not requested then all transactions should
4883 	 * already be clear and as such there is no work to do
4884 	 */
4885 	if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
4886 		return;
4887 
4888 	/*
4889 	 * Set loopback enable to prevent any transmits from being sent
4890 	 * should the link come up.  This assumes that the RXCTRL.RXEN bit
4891 	 * has already been cleared.
4892 	 */
4893 	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4894 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
4895 
4896 	/* Wait for a last completion before clearing buffers */
4897 	IXGBE_WRITE_FLUSH(hw);
4898 	msec_delay(3);
4899 
4900 	/*
4901 	 * Before proceeding, make sure that the PCIe block does not have
4902 	 * transactions pending.
4903 	 */
4904 	poll = ixgbe_pcie_timeout_poll(hw);
4905 	for (i = 0; i < poll; i++) {
4906 		usec_delay(100);
4907 		value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
4908 		if (IXGBE_REMOVED(hw->hw_addr))
4909 			goto out;
4910 		if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
4911 			goto out;
4912 	}
4913 
4914 out:
4915 	/* initiate cleaning flow for buffers in the PCIe transaction layer */
4916 	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
4917 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
4918 			gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
4919 
4920 	/* Flush all writes and allow 20usec for all transactions to clear */
4921 	IXGBE_WRITE_FLUSH(hw);
4922 	usec_delay(20);
4923 
4924 	/* restore previous register values */
4925 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4926 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4927 }
4928 
4929 static const u8 ixgbe_emc_temp_data[4] = {
4930 	IXGBE_EMC_INTERNAL_DATA,
4931 	IXGBE_EMC_DIODE1_DATA,
4932 	IXGBE_EMC_DIODE2_DATA,
4933 	IXGBE_EMC_DIODE3_DATA
4934 };
4935 static const u8 ixgbe_emc_therm_limit[4] = {
4936 	IXGBE_EMC_INTERNAL_THERM_LIMIT,
4937 	IXGBE_EMC_DIODE1_THERM_LIMIT,
4938 	IXGBE_EMC_DIODE2_THERM_LIMIT,
4939 	IXGBE_EMC_DIODE3_THERM_LIMIT
4940 };
4941 
4942 /**
4943  * ixgbe_get_thermal_sensor_data_generic - Gathers thermal sensor data
4944  * @hw: pointer to hardware structure
4945  *
4946  * Returns the thermal sensor data structure
4947  **/
4948 s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
4949 {
4950 	s32 status = IXGBE_SUCCESS;
4951 	u16 ets_offset;
4952 	u16 ets_cfg;
4953 	u16 ets_sensor;
4954 	u8  num_sensors;
4955 	u8  sensor_index;
4956 	u8  sensor_location;
4957 	u8  i;
4958 	struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
4959 
4960 	DEBUGFUNC("ixgbe_get_thermal_sensor_data_generic");
4961 
4962 	/* Only support thermal sensors attached to 82599 physical port 0 */
4963 	if ((hw->mac.type != ixgbe_mac_82599EB) ||
4964 	    (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) {
4965 		status = IXGBE_NOT_IMPLEMENTED;
4966 		goto out;
4967 	}
4968 
4969 	status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, &ets_offset);
4970 	if (status)
4971 		goto out;
4972 
4973 	if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) {
4974 		status = IXGBE_NOT_IMPLEMENTED;
4975 		goto out;
4976 	}
4977 
4978 	status = hw->eeprom.ops.read(hw, ets_offset, &ets_cfg);
4979 	if (status)
4980 		goto out;
4981 
4982 	if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT)
4983 		!= IXGBE_ETS_TYPE_EMC) {
4984 		status = IXGBE_NOT_IMPLEMENTED;
4985 		goto out;
4986 	}
4987 
4988 	num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
4989 	if (num_sensors > IXGBE_MAX_SENSORS)
4990 		num_sensors = IXGBE_MAX_SENSORS;
4991 
4992 	for (i = 0; i < num_sensors; i++) {
4993 		status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i),
4994 					     &ets_sensor);
4995 		if (status)
4996 			goto out;
4997 
4998 		sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
4999 				IXGBE_ETS_DATA_INDEX_SHIFT);
5000 		sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
5001 				   IXGBE_ETS_DATA_LOC_SHIFT);
5002 
5003 		if (sensor_location != 0) {
5004 			status = hw->phy.ops.read_i2c_byte(hw,
5005 					ixgbe_emc_temp_data[sensor_index],
5006 					IXGBE_I2C_THERMAL_SENSOR_ADDR,
5007 					&data->sensor[i].temp);
5008 			if (status)
5009 				goto out;
5010 		}
5011 	}
5012 out:
5013 	return status;
5014 }
5015 
5016 /**
5017  * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds
5018  * @hw: pointer to hardware structure
5019  *
5020  * Inits the thermal sensor thresholds according to the NVM map
5021  * and save off the threshold and location values into mac.thermal_sensor_data
5022  **/
5023 s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
5024 {
5025 	s32 status = IXGBE_SUCCESS;
5026 	u16 offset;
5027 	u16 ets_offset;
5028 	u16 ets_cfg;
5029 	u16 ets_sensor;
5030 	u8  low_thresh_delta;
5031 	u8  num_sensors;
5032 	u8  sensor_index;
5033 	u8  sensor_location;
5034 	u8  therm_limit;
5035 	u8  i;
5036 	struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
5037 
5038 	DEBUGFUNC("ixgbe_init_thermal_sensor_thresh_generic");
5039 
5040 	memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data));
5041 
5042 	/* Only support thermal sensors attached to 82599 physical port 0 */
5043 	if ((hw->mac.type != ixgbe_mac_82599EB) ||
5044 	    (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
5045 		return IXGBE_NOT_IMPLEMENTED;
5046 
5047 	offset = IXGBE_ETS_CFG;
5048 	if (hw->eeprom.ops.read(hw, offset, &ets_offset))
5049 		goto eeprom_err;
5050 	if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
5051 		return IXGBE_NOT_IMPLEMENTED;
5052 
5053 	offset = ets_offset;
5054 	if (hw->eeprom.ops.read(hw, offset, &ets_cfg))
5055 		goto eeprom_err;
5056 	if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT)
5057 		!= IXGBE_ETS_TYPE_EMC)
5058 		return IXGBE_NOT_IMPLEMENTED;
5059 
5060 	low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >>
5061 			     IXGBE_ETS_LTHRES_DELTA_SHIFT);
5062 	num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
5063 
5064 	for (i = 0; i < num_sensors; i++) {
5065 		offset = ets_offset + 1 + i;
5066 		if (hw->eeprom.ops.read(hw, offset, &ets_sensor)) {
5067 			ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
5068 				      "eeprom read at offset %d failed",
5069 				      offset);
5070 			continue;
5071 		}
5072 		sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
5073 				IXGBE_ETS_DATA_INDEX_SHIFT);
5074 		sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
5075 				   IXGBE_ETS_DATA_LOC_SHIFT);
5076 		therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK;
5077 
5078 		hw->phy.ops.write_i2c_byte(hw,
5079 			ixgbe_emc_therm_limit[sensor_index],
5080 			IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit);
5081 
5082 		if ((i < IXGBE_MAX_SENSORS) && (sensor_location != 0)) {
5083 			data->sensor[i].location = sensor_location;
5084 			data->sensor[i].caution_thresh = therm_limit;
5085 			data->sensor[i].max_op_thresh = therm_limit -
5086 							low_thresh_delta;
5087 		}
5088 	}
5089 	return status;
5090 
5091 eeprom_err:
5092 	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
5093 		      "eeprom read at offset %d failed", offset);
5094 	return IXGBE_NOT_IMPLEMENTED;
5095 }
5096 
5097 /**
5098  * ixgbe_bypass_rw_generic - Bit bang data into by_pass FW
5099  *
5100  * @hw: pointer to hardware structure
5101  * @cmd: Command we send to the FW
5102  * @status: The reply from the FW
5103  *
5104  * Bit-bangs the cmd to the by_pass FW status points to what is returned.
5105  **/
5106 #define IXGBE_BYPASS_BB_WAIT 1
5107 s32 ixgbe_bypass_rw_generic(struct ixgbe_hw *hw, u32 cmd, u32 *status)
5108 {
5109 	int i;
5110 	u32 sck, sdi, sdo, dir_sck, dir_sdi, dir_sdo;
5111 	u32 esdp;
5112 
5113 	if (!status)
5114 		return IXGBE_ERR_PARAM;
5115 
5116 	*status = 0;
5117 
5118 	/* SDP vary by MAC type */
5119 	switch (hw->mac.type) {
5120 	case ixgbe_mac_82599EB:
5121 		sck = IXGBE_ESDP_SDP7;
5122 		sdi = IXGBE_ESDP_SDP0;
5123 		sdo = IXGBE_ESDP_SDP6;
5124 		dir_sck = IXGBE_ESDP_SDP7_DIR;
5125 		dir_sdi = IXGBE_ESDP_SDP0_DIR;
5126 		dir_sdo = IXGBE_ESDP_SDP6_DIR;
5127 		break;
5128 	case ixgbe_mac_X540:
5129 		sck = IXGBE_ESDP_SDP2;
5130 		sdi = IXGBE_ESDP_SDP0;
5131 		sdo = IXGBE_ESDP_SDP1;
5132 		dir_sck = IXGBE_ESDP_SDP2_DIR;
5133 		dir_sdi = IXGBE_ESDP_SDP0_DIR;
5134 		dir_sdo = IXGBE_ESDP_SDP1_DIR;
5135 		break;
5136 	default:
5137 		return IXGBE_ERR_DEVICE_NOT_SUPPORTED;
5138 	}
5139 
5140 	/* Set SDP pins direction */
5141 	esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
5142 	esdp |= dir_sck;	/* SCK as output */
5143 	esdp |= dir_sdi;	/* SDI as output */
5144 	esdp &= ~dir_sdo;	/* SDO as input */
5145 	esdp |= sck;
5146 	esdp |= sdi;
5147 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
5148 	IXGBE_WRITE_FLUSH(hw);
5149 	msec_delay(IXGBE_BYPASS_BB_WAIT);
5150 
5151 	/* Generate start condition */
5152 	esdp &= ~sdi;
5153 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
5154 	IXGBE_WRITE_FLUSH(hw);
5155 	msec_delay(IXGBE_BYPASS_BB_WAIT);
5156 
5157 	esdp &= ~sck;
5158 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
5159 	IXGBE_WRITE_FLUSH(hw);
5160 	msec_delay(IXGBE_BYPASS_BB_WAIT);
5161 
5162 	/* Clock out the new control word and clock in the status */
5163 	for (i = 0; i < 32; i++) {
5164 		if ((cmd >> (31 - i)) & 0x01) {
5165 			esdp |= sdi;
5166 			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
5167 		} else {
5168 			esdp &= ~sdi;
5169 			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
5170 		}
5171 		IXGBE_WRITE_FLUSH(hw);
5172 		msec_delay(IXGBE_BYPASS_BB_WAIT);
5173 
5174 		esdp |= sck;
5175 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
5176 		IXGBE_WRITE_FLUSH(hw);
5177 		msec_delay(IXGBE_BYPASS_BB_WAIT);
5178 
5179 		esdp &= ~sck;
5180 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
5181 		IXGBE_WRITE_FLUSH(hw);
5182 		msec_delay(IXGBE_BYPASS_BB_WAIT);
5183 
5184 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
5185 		if (esdp & sdo)
5186 			*status = (*status << 1) | 0x01;
5187 		else
5188 			*status = (*status << 1) | 0x00;
5189 		msec_delay(IXGBE_BYPASS_BB_WAIT);
5190 	}
5191 
5192 	/* stop condition */
5193 	esdp |= sck;
5194 	esdp &= ~sdi;
5195 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
5196 	IXGBE_WRITE_FLUSH(hw);
5197 	msec_delay(IXGBE_BYPASS_BB_WAIT);
5198 
5199 	esdp |= sdi;
5200 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
5201 	IXGBE_WRITE_FLUSH(hw);
5202 
5203 	/* set the page bits to match the cmd that the status it belongs to */
5204 	*status = (*status & 0x3fffffff) | (cmd & 0xc0000000);
5205 
5206 	return IXGBE_SUCCESS;
5207 }
5208 
5209 /**
5210  * ixgbe_bypass_valid_rd_generic - Verify valid return from bit-bang.
5211  * @in_reg: The register cmd for the bit-bang read.
5212  * @out_reg: The register returned from a bit-bang read.
5213  *
5214  * If we send a write we can't be sure it took until we can read back
5215  * that same register.  It can be a problem as some of the fields may
5216  * for valid reasons change inbetween the time wrote the register and
5217  * we read it again to verify.  So this function check everything we
5218  * can check and then assumes it worked.
5219  **/
5220 bool ixgbe_bypass_valid_rd_generic(u32 in_reg, u32 out_reg)
5221 {
5222 	u32 mask;
5223 
5224 	/* Page must match for all control pages */
5225 	if ((in_reg & BYPASS_PAGE_M) != (out_reg & BYPASS_PAGE_M))
5226 		return false;
5227 
5228 	switch (in_reg & BYPASS_PAGE_M) {
5229 	case BYPASS_PAGE_CTL0:
5230 		/* All the following can't change since the last write
5231 		 *  - All the event actions
5232 		 *  - The timeout value
5233 		 */
5234 		mask = BYPASS_AUX_ON_M | BYPASS_MAIN_ON_M |
5235 		       BYPASS_MAIN_OFF_M | BYPASS_AUX_OFF_M |
5236 		       BYPASS_WDTIMEOUT_M |
5237 		       BYPASS_WDT_VALUE_M;
5238 		if ((out_reg & mask) != (in_reg & mask))
5239 			return false;
5240 
5241 		/* 0x0 is never a valid value for bypass status */
5242 		if (!(out_reg & BYPASS_STATUS_OFF_M))
5243 			return false;
5244 		break;
5245 	case BYPASS_PAGE_CTL1:
5246 		/* All the following can't change since the last write
5247 		 *  - time valid bit
5248 		 *  - time we last sent
5249 		 */
5250 		mask = BYPASS_CTL1_VALID_M | BYPASS_CTL1_TIME_M;
5251 		if ((out_reg & mask) != (in_reg & mask))
5252 			return false;
5253 		break;
5254 	case BYPASS_PAGE_CTL2:
5255 		/* All we can check in this page is control number
5256 		 * which is already done above.
5257 		 */
5258 		break;
5259 	}
5260 
5261 	/* We are as sure as we can be return true */
5262 	return true;
5263 }
5264 
5265 /**
5266  * ixgbe_bypass_set_generic - Set a bypass field in the FW CTRL Regiter.
5267  *
5268  * @hw: pointer to hardware structure
5269  * @ctrl: The control word we are setting.
5270  * @event: The event we are setting in the FW.  This also happens to
5271  *	    be the mask for the event we are setting (handy)
5272  * @action: The action we set the event to in the FW. This is in a
5273  *	     bit field that happens to be what we want to put in
5274  *	     the event spot (also handy)
5275  **/
5276 s32 ixgbe_bypass_set_generic(struct ixgbe_hw *hw, u32 ctrl, u32 event,
5277 			     u32 action)
5278 {
5279 	u32 by_ctl = 0;
5280 	u32 cmd, verify;
5281 	u32 count = 0;
5282 
5283 	/* Get current values */
5284 	cmd = ctrl;	/* just reading only need control number */
5285 	if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl))
5286 		return IXGBE_ERR_INVALID_ARGUMENT;
5287 
5288 	/* Set to new action */
5289 	cmd = (by_ctl & ~event) | BYPASS_WE | action;
5290 	if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl))
5291 		return IXGBE_ERR_INVALID_ARGUMENT;
5292 
5293 	/* Page 0 force a FW eeprom write which is slow so verify */
5294 	if ((cmd & BYPASS_PAGE_M) == BYPASS_PAGE_CTL0) {
5295 		verify = BYPASS_PAGE_CTL0;
5296 		do {
5297 			if (count++ > 5)
5298 				return IXGBE_BYPASS_FW_WRITE_FAILURE;
5299 
5300 			if (ixgbe_bypass_rw_generic(hw, verify, &by_ctl))
5301 				return IXGBE_ERR_INVALID_ARGUMENT;
5302 		} while (!ixgbe_bypass_valid_rd_generic(cmd, by_ctl));
5303 	} else {
5304 		/* We have give the FW time for the write to stick */
5305 		msec_delay(100);
5306 	}
5307 
5308 	return IXGBE_SUCCESS;
5309 }
5310 
5311 /**
5312  * ixgbe_bypass_rd_eep_generic - Read the bypass FW eeprom addres.
5313  *
5314  * @hw: pointer to hardware structure
5315  * @addr: The bypass eeprom address to read.
5316  * @value: The 8b of data at the address above.
5317  **/
5318 s32 ixgbe_bypass_rd_eep_generic(struct ixgbe_hw *hw, u32 addr, u8 *value)
5319 {
5320 	u32 cmd;
5321 	u32 status;
5322 
5323 
5324 	/* send the request */
5325 	cmd = BYPASS_PAGE_CTL2 | BYPASS_WE;
5326 	cmd |= (addr << BYPASS_CTL2_OFFSET_SHIFT) & BYPASS_CTL2_OFFSET_M;
5327 	if (ixgbe_bypass_rw_generic(hw, cmd, &status))
5328 		return IXGBE_ERR_INVALID_ARGUMENT;
5329 
5330 	/* We have give the FW time for the write to stick */
5331 	msec_delay(100);
5332 
5333 	/* now read the results */
5334 	cmd &= ~BYPASS_WE;
5335 	if (ixgbe_bypass_rw_generic(hw, cmd, &status))
5336 		return IXGBE_ERR_INVALID_ARGUMENT;
5337 
5338 	*value = status & BYPASS_CTL2_DATA_M;
5339 
5340 	return IXGBE_SUCCESS;
5341 }
5342 
5343 /**
5344  * ixgbe_get_orom_version - Return option ROM from EEPROM
5345  *
5346  * @hw: pointer to hardware structure
5347  * @nvm_ver: pointer to output structure
5348  *
5349  * if valid option ROM version, nvm_ver->or_valid set to true
5350  * else nvm_ver->or_valid is false.
5351  **/
5352 void ixgbe_get_orom_version(struct ixgbe_hw *hw,
5353 			    struct ixgbe_nvm_version *nvm_ver)
5354 {
5355 	u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl;
5356 
5357 	nvm_ver->or_valid = false;
5358 	/* Option Rom may or may not be present.  Start with pointer */
5359 	hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset);
5360 
5361 	/* make sure offset is valid */
5362 	if ((offset == 0x0) || (offset == NVM_INVALID_PTR))
5363 		return;
5364 
5365 	hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh);
5366 	hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl);
5367 
5368 	/* option rom exists and is valid */
5369 	if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 ||
5370 	    eeprom_cfg_blkl == NVM_VER_INVALID ||
5371 	    eeprom_cfg_blkh == NVM_VER_INVALID)
5372 		return;
5373 
5374 	nvm_ver->or_valid = true;
5375 	nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT;
5376 	nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) |
5377 			    (eeprom_cfg_blkh >> NVM_OROM_SHIFT);
5378 	nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK;
5379 }
5380 
5381 /**
5382  * ixgbe_get_oem_prod_version - Return OEM Product version
5383  *
5384  * @hw: pointer to hardware structure
5385  * @nvm_ver: pointer to output structure
5386  *
5387  * if valid OEM product version, nvm_ver->oem_valid set to true
5388  * else nvm_ver->oem_valid is false.
5389  **/
5390 void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
5391 				struct ixgbe_nvm_version *nvm_ver)
5392 {
5393 	u16 rel_num, prod_ver, mod_len, cap, offset;
5394 
5395 	nvm_ver->oem_valid = false;
5396 	hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset);
5397 
5398 	/* Return if offset to OEM Product Version block is invalid */
5399 	if (offset == 0x0 || offset == NVM_INVALID_PTR)
5400 		return;
5401 
5402 	/* Read product version block */
5403 	hw->eeprom.ops.read(hw, offset, &mod_len);
5404 	hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap);
5405 
5406 	/* Return if OEM product version block is invalid */
5407 	if (mod_len != NVM_OEM_PROD_VER_MOD_LEN ||
5408 	    (cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0)
5409 		return;
5410 
5411 	hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver);
5412 	hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num);
5413 
5414 	/* Return if version is invalid */
5415 	if ((rel_num | prod_ver) == 0x0 ||
5416 	    rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID)
5417 		return;
5418 
5419 	nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT;
5420 	nvm_ver->oem_minor = prod_ver & NVM_VER_MASK;
5421 	nvm_ver->oem_release = rel_num;
5422 	nvm_ver->oem_valid = true;
5423 }
5424 
5425 /**
5426  * ixgbe_get_etk_id - Return Etrack ID from EEPROM
5427  *
5428  * @hw: pointer to hardware structure
5429  * @nvm_ver: pointer to output structure
5430  *
5431  * word read errors will return 0xFFFF
5432  **/
5433 void ixgbe_get_etk_id(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver)
5434 {
5435 	u16 etk_id_l, etk_id_h;
5436 
5437 	if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_LOW, &etk_id_l))
5438 		etk_id_l = NVM_VER_INVALID;
5439 	if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_HI, &etk_id_h))
5440 		etk_id_h = NVM_VER_INVALID;
5441 
5442 	/* The word order for the version format is determined by high order
5443 	 * word bit 15.
5444 	 */
5445 	if ((etk_id_h & NVM_ETK_VALID) == 0) {
5446 		nvm_ver->etk_id = etk_id_h;
5447 		nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT);
5448 	} else {
5449 		nvm_ver->etk_id = etk_id_l;
5450 		nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT);
5451 	}
5452 }
5453 
5454 /**
5455  * ixgbe_get_nvm_version - Return version of NVM and its components
5456  *
5457  * @hw: pointer to hardware structure
5458  * @nvm_ver: pointer to output structure
5459  *
5460  * irrelevant component fields will return 0, read errors will return 0xff
5461  **/
5462 void ixgbe_get_nvm_version(struct ixgbe_hw *hw,
5463 			struct ixgbe_nvm_version *nvm_ver)
5464 {
5465 	u16 word, phy_ver;
5466 
5467 	DEBUGFUNC("ixgbe_get_nvm_version");
5468 
5469 	memset(nvm_ver, 0, sizeof(struct ixgbe_nvm_version));
5470 
5471 	/* eeprom version is mac-type specific */
5472 	switch (hw->mac.type) {
5473 	case ixgbe_mac_82598EB:
5474 		/* version of eeprom section */
5475 		if (ixgbe_read_eeprom(hw, NVM_EEP_OFFSET_82598, &word))
5476 			word = NVM_VER_INVALID;
5477 		nvm_ver->nvm_major = ((word & NVM_EEP_MAJOR_MASK)
5478 				      >> NVM_EEP_MAJ_SHIFT);
5479 		nvm_ver->nvm_minor = ((word & NVM_EEP_MINOR_MASK)
5480 				      >> NVM_EEP_MIN_SHIFT);
5481 		nvm_ver->nvm_id = (word & NVM_EEP_ID_MASK);
5482 		break;
5483 	case ixgbe_mac_X540:
5484 		/* version of eeprom section */
5485 		if (ixgbe_read_eeprom(hw, NVM_EEP_OFFSET_X540, &word))
5486 			word = NVM_VER_INVALID;
5487 		nvm_ver->nvm_major = ((word & NVM_EEP_MAJOR_MASK)
5488 				      >> NVM_EEP_MAJ_SHIFT);
5489 		nvm_ver->nvm_minor = ((word & NVM_EEP_MINOR_MASK)
5490 				      >> NVM_EEP_MIN_SHIFT);
5491 		nvm_ver->nvm_id = (word & NVM_EEP_ID_MASK);
5492 		break;
5493 
5494 	case ixgbe_mac_X550:
5495 	case ixgbe_mac_X550EM_x:
5496 	case ixgbe_mac_X550EM_a:
5497 		/* version of eeprom section */
5498 		if (ixgbe_read_eeprom(hw, NVM_EEP_OFFSET_X540, &word))
5499 			word = NVM_VER_INVALID;
5500 		nvm_ver->nvm_major = ((word & NVM_EEP_MAJOR_MASK)
5501 				      >> NVM_EEP_MAJ_SHIFT);
5502 		nvm_ver->nvm_minor = (word & NVM_EEP_X550_MINOR_MASK);
5503 
5504 		break;
5505 	default:
5506 		break;
5507 	}
5508 
5509 	/* phy version is mac-type specific */
5510 	switch (hw->mac.type) {
5511 	case ixgbe_mac_X540:
5512 	case ixgbe_mac_X550:
5513 	case ixgbe_mac_X550EM_x:
5514 	case ixgbe_mac_X550EM_a:
5515 		/* intel phy firmware version */
5516 		if (ixgbe_read_eeprom(hw, NVM_EEP_PHY_OFF_X540, &word))
5517 			word = NVM_VER_INVALID;
5518 		nvm_ver->phy_fw_maj = ((word & NVM_PHY_MAJOR_MASK)
5519 				       >> NVM_PHY_MAJ_SHIFT);
5520 		nvm_ver->phy_fw_min = ((word & NVM_PHY_MINOR_MASK)
5521 				       >> NVM_PHY_MIN_SHIFT);
5522 		nvm_ver->phy_fw_id = (word & NVM_PHY_ID_MASK);
5523 		break;
5524 	default:
5525 		break;
5526 	}
5527 
5528 	ixgbe_get_etk_id(hw, nvm_ver);
5529 
5530 	/* devstarter image */
5531 	if (ixgbe_read_eeprom(hw, NVM_DS_OFFSET, &word))
5532 		word = NVM_VER_INVALID;
5533 	nvm_ver->devstart_major = ((word & NVM_DS_MAJOR_MASK) >> NVM_DS_SHIFT);
5534 	nvm_ver->devstart_minor = (word & NVM_DS_MINOR_MASK);
5535 
5536 	/* OEM customization word */
5537 	if (ixgbe_read_eeprom(hw, NVM_OEM_OFFSET, &nvm_ver->oem_specific))
5538 		nvm_ver->oem_specific = NVM_VER_INVALID;
5539 
5540 	/* vendor (not intel) phy firmware version */
5541 	if (ixgbe_get_phy_firmware_version(hw, &phy_ver))
5542 		phy_ver = NVM_VER_INVALID;
5543 	nvm_ver->phy_vend_maj = ((phy_ver & NVM_PHYVEND_MAJOR_MASK)
5544 				 >> NVM_PHYVEND_SHIFT);
5545 	nvm_ver->phy_vend_min = (phy_ver & NVM_PHYVEND_MINOR_MASK);
5546 
5547 	/* Option Rom may or may not be present.  Start with pointer */
5548 	ixgbe_get_orom_version(hw, nvm_ver);
5549 	return;
5550 }
5551 
5552 /**
5553  * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg
5554  * @hw: pointer to hardware structure
5555  * @map: pointer to u8 arr for returning map
5556  *
5557  * Read the rtrup2tc HW register and resolve its content into map
5558  **/
5559 void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map)
5560 {
5561 	u32 reg, i;
5562 
5563 	reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
5564 	for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
5565 		map[i] = IXGBE_RTRUP2TC_UP_MASK &
5566 			(reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
5567 	return;
5568 }
5569 
5570 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
5571 {
5572 	u32 pfdtxgswc;
5573 	u32 rxctrl;
5574 
5575 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5576 	if (rxctrl & IXGBE_RXCTRL_RXEN) {
5577 		if (hw->mac.type != ixgbe_mac_82598EB) {
5578 			pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
5579 			if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
5580 				pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
5581 				IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
5582 				hw->mac.set_lben = true;
5583 			} else {
5584 				hw->mac.set_lben = false;
5585 			}
5586 		}
5587 		rxctrl &= ~IXGBE_RXCTRL_RXEN;
5588 		IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
5589 	}
5590 }
5591 
5592 void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
5593 {
5594 	u32 pfdtxgswc;
5595 	u32 rxctrl;
5596 
5597 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5598 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN));
5599 
5600 	if (hw->mac.type != ixgbe_mac_82598EB) {
5601 		if (hw->mac.set_lben) {
5602 			pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
5603 			pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN;
5604 			IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
5605 			hw->mac.set_lben = false;
5606 		}
5607 	}
5608 }
5609 
5610 /**
5611  * ixgbe_mng_present - returns true when management capability is present
5612  * @hw: pointer to hardware structure
5613  */
5614 bool ixgbe_mng_present(struct ixgbe_hw *hw)
5615 {
5616 	u32 fwsm;
5617 
5618 	if (hw->mac.type < ixgbe_mac_82599EB)
5619 		return false;
5620 
5621 	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
5622 
5623 	return !!(fwsm & IXGBE_FWSM_FW_MODE_PT);
5624 }
5625 
5626 /**
5627  * ixgbe_mng_enabled - Is the manageability engine enabled?
5628  * @hw: pointer to hardware structure
5629  *
5630  * Returns true if the manageability engine is enabled.
5631  **/
5632 bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
5633 {
5634 	u32 fwsm, manc, factps;
5635 
5636 	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
5637 	if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
5638 		return false;
5639 
5640 	manc = IXGBE_READ_REG(hw, IXGBE_MANC);
5641 	if (!(manc & IXGBE_MANC_RCV_TCO_EN))
5642 		return false;
5643 
5644 	if (hw->mac.type <= ixgbe_mac_X540) {
5645 		factps = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
5646 		if (factps & IXGBE_FACTPS_MNGCG)
5647 			return false;
5648 	}
5649 
5650 	return true;
5651 }
5652 
5653 /**
5654  * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
5655  * @hw: pointer to hardware structure
5656  * @speed: new link speed
5657  * @autoneg_wait_to_complete: true when waiting for completion is needed
5658  *
5659  * Set the link speed in the MAC and/or PHY register and restarts link.
5660  **/
5661 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
5662 					  ixgbe_link_speed speed,
5663 					  bool autoneg_wait_to_complete)
5664 {
5665 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
5666 	ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
5667 	s32 status = IXGBE_SUCCESS;
5668 	u32 speedcnt = 0;
5669 	u32 i = 0;
5670 	bool autoneg, link_up = false;
5671 
5672 	DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
5673 
5674 	/* Mask off requested but non-supported speeds */
5675 	status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
5676 	if (status != IXGBE_SUCCESS)
5677 		return status;
5678 
5679 	speed &= link_speed;
5680 
5681 	/* Try each speed one by one, highest priority first.  We do this in
5682 	 * software because 10Gb fiber doesn't support speed autonegotiation.
5683 	 */
5684 	if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
5685 		speedcnt++;
5686 		highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
5687 
5688 		/* Set the module link speed */
5689 		switch (hw->phy.media_type) {
5690 		case ixgbe_media_type_fiber_fixed:
5691 		case ixgbe_media_type_fiber:
5692 			ixgbe_set_rate_select_speed(hw,
5693 						    IXGBE_LINK_SPEED_10GB_FULL);
5694 			break;
5695 		case ixgbe_media_type_fiber_qsfp:
5696 			/* QSFP module automatically detects MAC link speed */
5697 			break;
5698 		default:
5699 			DEBUGOUT("Unexpected media type.\n");
5700 			break;
5701 		}
5702 
5703 		/* Allow module to change analog characteristics (1G->10G) */
5704 		msec_delay(40);
5705 
5706 		status = ixgbe_setup_mac_link(hw,
5707 					      IXGBE_LINK_SPEED_10GB_FULL,
5708 					      autoneg_wait_to_complete);
5709 		if (status != IXGBE_SUCCESS)
5710 			return status;
5711 
5712 		/* Flap the Tx laser if it has not already been done */
5713 		ixgbe_flap_tx_laser(hw);
5714 
5715 		/* Wait for the controller to acquire link.  Per IEEE 802.3ap,
5716 		 * Section 73.10.2, we may have to wait up to 1000ms if KR is
5717 		 * attempted.  82599 uses the same timing for 10g SFI.
5718 		 */
5719 		for (i = 0; i < 10; i++) {
5720 			/* Wait for the link partner to also set speed */
5721 			msec_delay(100);
5722 
5723 			/* If we have link, just jump out */
5724 			status = ixgbe_check_link(hw, &link_speed,
5725 						  &link_up, false);
5726 			if (status != IXGBE_SUCCESS)
5727 				return status;
5728 
5729 			if (link_up)
5730 				goto out;
5731 		}
5732 	}
5733 
5734 	if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
5735 		speedcnt++;
5736 		if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
5737 			highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
5738 
5739 		/* Set the module link speed */
5740 		switch (hw->phy.media_type) {
5741 		case ixgbe_media_type_fiber_fixed:
5742 		case ixgbe_media_type_fiber:
5743 			ixgbe_set_rate_select_speed(hw,
5744 						    IXGBE_LINK_SPEED_1GB_FULL);
5745 			break;
5746 		case ixgbe_media_type_fiber_qsfp:
5747 			/* QSFP module automatically detects link speed */
5748 			break;
5749 		default:
5750 			DEBUGOUT("Unexpected media type.\n");
5751 			break;
5752 		}
5753 
5754 		/* Allow module to change analog characteristics (10G->1G) */
5755 		msec_delay(40);
5756 
5757 		status = ixgbe_setup_mac_link(hw,
5758 					      IXGBE_LINK_SPEED_1GB_FULL,
5759 					      autoneg_wait_to_complete);
5760 		if (status != IXGBE_SUCCESS)
5761 			return status;
5762 
5763 		/* Flap the Tx laser if it has not already been done */
5764 		ixgbe_flap_tx_laser(hw);
5765 
5766 		/* Wait for the link partner to also set speed */
5767 		msec_delay(100);
5768 
5769 		/* If we have link, just jump out */
5770 		status = ixgbe_check_link(hw, &link_speed, &link_up, false);
5771 		if (status != IXGBE_SUCCESS)
5772 			return status;
5773 
5774 		if (link_up)
5775 			goto out;
5776 	}
5777 
5778 	/* We didn't get link.  Configure back to the highest speed we tried,
5779 	 * (if there was more than one).  We call ourselves back with just the
5780 	 * single highest speed that the user requested.
5781 	 */
5782 	if (speedcnt > 1)
5783 		status = ixgbe_setup_mac_link_multispeed_fiber(hw,
5784 						      highest_link_speed,
5785 						      autoneg_wait_to_complete);
5786 
5787 out:
5788 	/* Set autoneg_advertised value based on input link speed */
5789 	hw->phy.autoneg_advertised = 0;
5790 
5791 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
5792 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
5793 
5794 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
5795 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
5796 
5797 	return status;
5798 }
5799 
5800 /**
5801  * ixgbe_set_soft_rate_select_speed - Set module link speed
5802  * @hw: pointer to hardware structure
5803  * @speed: link speed to set
5804  *
5805  * Set module link speed via the soft rate select.
5806  */
5807 void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
5808 					ixgbe_link_speed speed)
5809 {
5810 	s32 status;
5811 	u8 rs, eeprom_data;
5812 
5813 	switch (speed) {
5814 	case IXGBE_LINK_SPEED_10GB_FULL:
5815 		/* one bit mask same as setting on */
5816 		rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
5817 		break;
5818 	case IXGBE_LINK_SPEED_1GB_FULL:
5819 		rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
5820 		break;
5821 	default:
5822 		DEBUGOUT("Invalid fixed module speed\n");
5823 		return;
5824 	}
5825 
5826 	/* Set RS0 */
5827 	status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
5828 					   IXGBE_I2C_EEPROM_DEV_ADDR2,
5829 					   &eeprom_data);
5830 	if (status) {
5831 		DEBUGOUT("Failed to read Rx Rate Select RS0\n");
5832 		goto out;
5833 	}
5834 
5835 	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
5836 
5837 	status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
5838 					    IXGBE_I2C_EEPROM_DEV_ADDR2,
5839 					    eeprom_data);
5840 	if (status) {
5841 		DEBUGOUT("Failed to write Rx Rate Select RS0\n");
5842 		goto out;
5843 	}
5844 
5845 	/* Set RS1 */
5846 	status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
5847 					   IXGBE_I2C_EEPROM_DEV_ADDR2,
5848 					   &eeprom_data);
5849 	if (status) {
5850 		DEBUGOUT("Failed to read Rx Rate Select RS1\n");
5851 		goto out;
5852 	}
5853 
5854 	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
5855 
5856 	status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
5857 					    IXGBE_I2C_EEPROM_DEV_ADDR2,
5858 					    eeprom_data);
5859 	if (status) {
5860 		DEBUGOUT("Failed to write Rx Rate Select RS1\n");
5861 		goto out;
5862 	}
5863 out:
5864 	return;
5865 }
5866