xref: /freebsd/sys/dev/ixgbe/ixgbe_common.c (revision 0b57cec536236d46e3dba9bd041533462f33dbb7)
1 /******************************************************************************
2   SPDX-License-Identifier: BSD-3-Clause
3 
4   Copyright (c) 2001-2017, Intel Corporation
5   All rights reserved.
6 
7   Redistribution and use in source and binary forms, with or without
8   modification, are permitted provided that the following conditions are met:
9 
10    1. Redistributions of source code must retain the above copyright notice,
11       this list of conditions and the following disclaimer.
12 
13    2. Redistributions in binary form must reproduce the above copyright
14       notice, this list of conditions and the following disclaimer in the
15       documentation and/or other materials provided with the distribution.
16 
17    3. Neither the name of the Intel Corporation nor the names of its
18       contributors may be used to endorse or promote products derived from
19       this software without specific prior written permission.
20 
21   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31   POSSIBILITY OF SUCH DAMAGE.
32 
33 ******************************************************************************/
34 /*$FreeBSD$*/
35 
36 #include "ixgbe_common.h"
37 #include "ixgbe_phy.h"
38 #include "ixgbe_dcb.h"
39 #include "ixgbe_dcb_82599.h"
40 #include "ixgbe_api.h"
41 
42 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
43 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
44 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
45 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
46 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
47 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
48 					u16 count);
49 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
50 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
51 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
52 static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
53 
54 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
55 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
56 					 u16 *san_mac_offset);
57 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
58 					     u16 words, u16 *data);
59 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
60 					      u16 words, u16 *data);
61 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
62 						 u16 offset);
63 
64 /**
65  *  ixgbe_init_ops_generic - Inits function ptrs
66  *  @hw: pointer to the hardware structure
67  *
68  *  Initialize the function pointers.
69  **/
70 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
71 {
72 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
73 	struct ixgbe_mac_info *mac = &hw->mac;
74 	u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
75 
76 	DEBUGFUNC("ixgbe_init_ops_generic");
77 
78 	/* EEPROM */
79 	eeprom->ops.init_params = ixgbe_init_eeprom_params_generic;
80 	/* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
81 	if (eec & IXGBE_EEC_PRES) {
82 		eeprom->ops.read = ixgbe_read_eerd_generic;
83 		eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic;
84 	} else {
85 		eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic;
86 		eeprom->ops.read_buffer =
87 				 ixgbe_read_eeprom_buffer_bit_bang_generic;
88 	}
89 	eeprom->ops.write = ixgbe_write_eeprom_generic;
90 	eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic;
91 	eeprom->ops.validate_checksum =
92 				      ixgbe_validate_eeprom_checksum_generic;
93 	eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic;
94 	eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic;
95 
96 	/* MAC */
97 	mac->ops.init_hw = ixgbe_init_hw_generic;
98 	mac->ops.reset_hw = NULL;
99 	mac->ops.start_hw = ixgbe_start_hw_generic;
100 	mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic;
101 	mac->ops.get_media_type = NULL;
102 	mac->ops.get_supported_physical_layer = NULL;
103 	mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic;
104 	mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic;
105 	mac->ops.stop_adapter = ixgbe_stop_adapter_generic;
106 	mac->ops.get_bus_info = ixgbe_get_bus_info_generic;
107 	mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie;
108 	mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync;
109 	mac->ops.release_swfw_sync = ixgbe_release_swfw_sync;
110 	mac->ops.prot_autoc_read = prot_autoc_read_generic;
111 	mac->ops.prot_autoc_write = prot_autoc_write_generic;
112 
113 	/* LEDs */
114 	mac->ops.led_on = ixgbe_led_on_generic;
115 	mac->ops.led_off = ixgbe_led_off_generic;
116 	mac->ops.blink_led_start = ixgbe_blink_led_start_generic;
117 	mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic;
118 	mac->ops.init_led_link_act = ixgbe_init_led_link_act_generic;
119 
120 	/* RAR, Multicast, VLAN */
121 	mac->ops.set_rar = ixgbe_set_rar_generic;
122 	mac->ops.clear_rar = ixgbe_clear_rar_generic;
123 	mac->ops.insert_mac_addr = NULL;
124 	mac->ops.set_vmdq = NULL;
125 	mac->ops.clear_vmdq = NULL;
126 	mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic;
127 	mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic;
128 	mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic;
129 	mac->ops.enable_mc = ixgbe_enable_mc_generic;
130 	mac->ops.disable_mc = ixgbe_disable_mc_generic;
131 	mac->ops.clear_vfta = NULL;
132 	mac->ops.set_vfta = NULL;
133 	mac->ops.set_vlvf = NULL;
134 	mac->ops.init_uta_tables = NULL;
135 	mac->ops.enable_rx = ixgbe_enable_rx_generic;
136 	mac->ops.disable_rx = ixgbe_disable_rx_generic;
137 
138 	/* Flow Control */
139 	mac->ops.fc_enable = ixgbe_fc_enable_generic;
140 	mac->ops.setup_fc = ixgbe_setup_fc_generic;
141 	mac->ops.fc_autoneg = ixgbe_fc_autoneg;
142 
143 	/* Link */
144 	mac->ops.get_link_capabilities = NULL;
145 	mac->ops.setup_link = NULL;
146 	mac->ops.check_link = NULL;
147 	mac->ops.dmac_config = NULL;
148 	mac->ops.dmac_update_tcs = NULL;
149 	mac->ops.dmac_config_tcs = NULL;
150 
151 	return IXGBE_SUCCESS;
152 }
153 
154 /**
155  * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
156  * of flow control
157  * @hw: pointer to hardware structure
158  *
159  * This function returns TRUE if the device supports flow control
160  * autonegotiation, and FALSE if it does not.
161  *
162  **/
163 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
164 {
165 	bool supported = FALSE;
166 	ixgbe_link_speed speed;
167 	bool link_up;
168 
169 	DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
170 
171 	switch (hw->phy.media_type) {
172 	case ixgbe_media_type_fiber_fixed:
173 	case ixgbe_media_type_fiber_qsfp:
174 	case ixgbe_media_type_fiber:
175 		/* flow control autoneg black list */
176 		switch (hw->device_id) {
177 		case IXGBE_DEV_ID_X550EM_A_SFP:
178 		case IXGBE_DEV_ID_X550EM_A_SFP_N:
179 		case IXGBE_DEV_ID_X550EM_A_QSFP:
180 		case IXGBE_DEV_ID_X550EM_A_QSFP_N:
181 			supported = FALSE;
182 			break;
183 		default:
184 			hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
185 			/* if link is down, assume supported */
186 			if (link_up)
187 				supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
188 				TRUE : FALSE;
189 			else
190 				supported = TRUE;
191 		}
192 
193 		break;
194 	case ixgbe_media_type_backplane:
195 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI)
196 			supported = FALSE;
197 		else
198 			supported = TRUE;
199 		break;
200 	case ixgbe_media_type_copper:
201 		/* only some copper devices support flow control autoneg */
202 		switch (hw->device_id) {
203 		case IXGBE_DEV_ID_82599_T3_LOM:
204 		case IXGBE_DEV_ID_X540T:
205 		case IXGBE_DEV_ID_X540T1:
206 		case IXGBE_DEV_ID_X540_BYPASS:
207 		case IXGBE_DEV_ID_X550T:
208 		case IXGBE_DEV_ID_X550T1:
209 		case IXGBE_DEV_ID_X550EM_X_10G_T:
210 		case IXGBE_DEV_ID_X550EM_A_10G_T:
211 		case IXGBE_DEV_ID_X550EM_A_1G_T:
212 		case IXGBE_DEV_ID_X550EM_A_1G_T_L:
213 			supported = TRUE;
214 			break;
215 		default:
216 			supported = FALSE;
217 		}
218 	default:
219 		break;
220 	}
221 
222 	if (!supported)
223 		ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
224 			      "Device %x does not support flow control autoneg",
225 			      hw->device_id);
226 
227 	return supported;
228 }
229 
230 /**
231  *  ixgbe_setup_fc_generic - Set up flow control
232  *  @hw: pointer to hardware structure
233  *
234  *  Called at init time to set up flow control.
235  **/
236 s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
237 {
238 	s32 ret_val = IXGBE_SUCCESS;
239 	u32 reg = 0, reg_bp = 0;
240 	u16 reg_cu = 0;
241 	bool locked = FALSE;
242 
243 	DEBUGFUNC("ixgbe_setup_fc_generic");
244 
245 	/* Validate the requested mode */
246 	if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
247 		ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
248 			   "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
249 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
250 		goto out;
251 	}
252 
253 	/*
254 	 * 10gig parts do not have a word in the EEPROM to determine the
255 	 * default flow control setting, so we explicitly set it to full.
256 	 */
257 	if (hw->fc.requested_mode == ixgbe_fc_default)
258 		hw->fc.requested_mode = ixgbe_fc_full;
259 
260 	/*
261 	 * Set up the 1G and 10G flow control advertisement registers so the
262 	 * HW will be able to do fc autoneg once the cable is plugged in.  If
263 	 * we link at 10G, the 1G advertisement is harmless and vice versa.
264 	 */
265 	switch (hw->phy.media_type) {
266 	case ixgbe_media_type_backplane:
267 		/* some MAC's need RMW protection on AUTOC */
268 		ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &reg_bp);
269 		if (ret_val != IXGBE_SUCCESS)
270 			goto out;
271 
272 		/* only backplane uses autoc */
273 		/* FALLTHROUGH */
274 	case ixgbe_media_type_fiber_fixed:
275 	case ixgbe_media_type_fiber_qsfp:
276 	case ixgbe_media_type_fiber:
277 		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
278 
279 		break;
280 	case ixgbe_media_type_copper:
281 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
282 				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg_cu);
283 		break;
284 	default:
285 		break;
286 	}
287 
288 	/*
289 	 * The possible values of fc.requested_mode are:
290 	 * 0: Flow control is completely disabled
291 	 * 1: Rx flow control is enabled (we can receive pause frames,
292 	 *    but not send pause frames).
293 	 * 2: Tx flow control is enabled (we can send pause frames but
294 	 *    we do not support receiving pause frames).
295 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
296 	 * other: Invalid.
297 	 */
298 	switch (hw->fc.requested_mode) {
299 	case ixgbe_fc_none:
300 		/* Flow control completely disabled by software override. */
301 		reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
302 		if (hw->phy.media_type == ixgbe_media_type_backplane)
303 			reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
304 				    IXGBE_AUTOC_ASM_PAUSE);
305 		else if (hw->phy.media_type == ixgbe_media_type_copper)
306 			reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
307 		break;
308 	case ixgbe_fc_tx_pause:
309 		/*
310 		 * Tx Flow control is enabled, and Rx Flow control is
311 		 * disabled by software override.
312 		 */
313 		reg |= IXGBE_PCS1GANA_ASM_PAUSE;
314 		reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
315 		if (hw->phy.media_type == ixgbe_media_type_backplane) {
316 			reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
317 			reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
318 		} else if (hw->phy.media_type == ixgbe_media_type_copper) {
319 			reg_cu |= IXGBE_TAF_ASM_PAUSE;
320 			reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
321 		}
322 		break;
323 	case ixgbe_fc_rx_pause:
324 		/*
325 		 * Rx Flow control is enabled and Tx Flow control is
326 		 * disabled by software override. Since there really
327 		 * isn't a way to advertise that we are capable of RX
328 		 * Pause ONLY, we will advertise that we support both
329 		 * symmetric and asymmetric Rx PAUSE, as such we fall
330 		 * through to the fc_full statement.  Later, we will
331 		 * disable the adapter's ability to send PAUSE frames.
332 		 */
333 	case ixgbe_fc_full:
334 		/* Flow control (both Rx and Tx) is enabled by SW override. */
335 		reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
336 		if (hw->phy.media_type == ixgbe_media_type_backplane)
337 			reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
338 				  IXGBE_AUTOC_ASM_PAUSE;
339 		else if (hw->phy.media_type == ixgbe_media_type_copper)
340 			reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
341 		break;
342 	default:
343 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
344 			     "Flow control param set incorrectly\n");
345 		ret_val = IXGBE_ERR_CONFIG;
346 		goto out;
347 		break;
348 	}
349 
350 	if (hw->mac.type < ixgbe_mac_X540) {
351 		/*
352 		 * Enable auto-negotiation between the MAC & PHY;
353 		 * the MAC will advertise clause 37 flow control.
354 		 */
355 		IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
356 		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
357 
358 		/* Disable AN timeout */
359 		if (hw->fc.strict_ieee)
360 			reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
361 
362 		IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
363 		DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
364 	}
365 
366 	/*
367 	 * AUTOC restart handles negotiation of 1G and 10G on backplane
368 	 * and copper. There is no need to set the PCS1GCTL register.
369 	 *
370 	 */
371 	if (hw->phy.media_type == ixgbe_media_type_backplane) {
372 		reg_bp |= IXGBE_AUTOC_AN_RESTART;
373 		ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
374 		if (ret_val)
375 			goto out;
376 	} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
377 		    (ixgbe_device_supports_autoneg_fc(hw))) {
378 		hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
379 				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
380 	}
381 
382 	DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
383 out:
384 	return ret_val;
385 }
386 
387 /**
388  *  ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
389  *  @hw: pointer to hardware structure
390  *
391  *  Starts the hardware by filling the bus info structure and media type, clears
392  *  all on chip counters, initializes receive address registers, multicast
393  *  table, VLAN filter table, calls routine to set up link and flow control
394  *  settings, and leaves transmit and receive units disabled and uninitialized
395  **/
396 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
397 {
398 	s32 ret_val;
399 	u32 ctrl_ext;
400 	u16 device_caps;
401 
402 	DEBUGFUNC("ixgbe_start_hw_generic");
403 
404 	/* Set the media type */
405 	hw->phy.media_type = hw->mac.ops.get_media_type(hw);
406 
407 	/* PHY ops initialization must be done in reset_hw() */
408 
409 	/* Clear the VLAN filter table */
410 	hw->mac.ops.clear_vfta(hw);
411 
412 	/* Clear statistics registers */
413 	hw->mac.ops.clear_hw_cntrs(hw);
414 
415 	/* Set No Snoop Disable */
416 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
417 	ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
418 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
419 	IXGBE_WRITE_FLUSH(hw);
420 
421 	/* Setup flow control */
422 	ret_val = ixgbe_setup_fc(hw);
423 	if (ret_val != IXGBE_SUCCESS && ret_val != IXGBE_NOT_IMPLEMENTED) {
424 		DEBUGOUT1("Flow control setup failed, returning %d\n", ret_val);
425 		return ret_val;
426 	}
427 
428 	/* Cache bit indicating need for crosstalk fix */
429 	switch (hw->mac.type) {
430 	case ixgbe_mac_82599EB:
431 	case ixgbe_mac_X550EM_x:
432 	case ixgbe_mac_X550EM_a:
433 		hw->mac.ops.get_device_caps(hw, &device_caps);
434 		if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR)
435 			hw->need_crosstalk_fix = FALSE;
436 		else
437 			hw->need_crosstalk_fix = TRUE;
438 		break;
439 	default:
440 		hw->need_crosstalk_fix = FALSE;
441 		break;
442 	}
443 
444 	/* Clear adapter stopped flag */
445 	hw->adapter_stopped = FALSE;
446 
447 	return IXGBE_SUCCESS;
448 }
449 
450 /**
451  *  ixgbe_start_hw_gen2 - Init sequence for common device family
452  *  @hw: pointer to hw structure
453  *
454  * Performs the init sequence common to the second generation
455  * of 10 GbE devices.
456  * Devices in the second generation:
457  *     82599
458  *     X540
459  **/
460 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
461 {
462 	u32 i;
463 	u32 regval;
464 
465 	/* Clear the rate limiters */
466 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
467 		IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
468 		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
469 	}
470 	IXGBE_WRITE_FLUSH(hw);
471 
472 	/* Disable relaxed ordering */
473 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
474 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
475 		regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
476 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
477 	}
478 
479 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
480 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
481 		regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
482 			    IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
483 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
484 	}
485 
486 	return IXGBE_SUCCESS;
487 }
488 
489 /**
490  *  ixgbe_init_hw_generic - Generic hardware initialization
491  *  @hw: pointer to hardware structure
492  *
493  *  Initialize the hardware by resetting the hardware, filling the bus info
494  *  structure and media type, clears all on chip counters, initializes receive
495  *  address registers, multicast table, VLAN filter table, calls routine to set
496  *  up link and flow control settings, and leaves transmit and receive units
497  *  disabled and uninitialized
498  **/
499 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
500 {
501 	s32 status;
502 
503 	DEBUGFUNC("ixgbe_init_hw_generic");
504 
505 	/* Reset the hardware */
506 	status = hw->mac.ops.reset_hw(hw);
507 
508 	if (status == IXGBE_SUCCESS || status == IXGBE_ERR_SFP_NOT_PRESENT) {
509 		/* Start the HW */
510 		status = hw->mac.ops.start_hw(hw);
511 	}
512 
513 	/* Initialize the LED link active for LED blink support */
514 	if (hw->mac.ops.init_led_link_act)
515 		hw->mac.ops.init_led_link_act(hw);
516 
517 	if (status != IXGBE_SUCCESS)
518 		DEBUGOUT1("Failed to initialize HW, STATUS = %d\n", status);
519 
520 	return status;
521 }
522 
523 /**
524  *  ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
525  *  @hw: pointer to hardware structure
526  *
527  *  Clears all hardware statistics counters by reading them from the hardware
528  *  Statistics counters are clear on read.
529  **/
530 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
531 {
532 	u16 i = 0;
533 
534 	DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
535 
536 	IXGBE_READ_REG(hw, IXGBE_CRCERRS);
537 	IXGBE_READ_REG(hw, IXGBE_ILLERRC);
538 	IXGBE_READ_REG(hw, IXGBE_ERRBC);
539 	IXGBE_READ_REG(hw, IXGBE_MSPDC);
540 	for (i = 0; i < 8; i++)
541 		IXGBE_READ_REG(hw, IXGBE_MPC(i));
542 
543 	IXGBE_READ_REG(hw, IXGBE_MLFC);
544 	IXGBE_READ_REG(hw, IXGBE_MRFC);
545 	IXGBE_READ_REG(hw, IXGBE_RLEC);
546 	IXGBE_READ_REG(hw, IXGBE_LXONTXC);
547 	IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
548 	if (hw->mac.type >= ixgbe_mac_82599EB) {
549 		IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
550 		IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
551 	} else {
552 		IXGBE_READ_REG(hw, IXGBE_LXONRXC);
553 		IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
554 	}
555 
556 	for (i = 0; i < 8; i++) {
557 		IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
558 		IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
559 		if (hw->mac.type >= ixgbe_mac_82599EB) {
560 			IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
561 			IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
562 		} else {
563 			IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
564 			IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
565 		}
566 	}
567 	if (hw->mac.type >= ixgbe_mac_82599EB)
568 		for (i = 0; i < 8; i++)
569 			IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
570 	IXGBE_READ_REG(hw, IXGBE_PRC64);
571 	IXGBE_READ_REG(hw, IXGBE_PRC127);
572 	IXGBE_READ_REG(hw, IXGBE_PRC255);
573 	IXGBE_READ_REG(hw, IXGBE_PRC511);
574 	IXGBE_READ_REG(hw, IXGBE_PRC1023);
575 	IXGBE_READ_REG(hw, IXGBE_PRC1522);
576 	IXGBE_READ_REG(hw, IXGBE_GPRC);
577 	IXGBE_READ_REG(hw, IXGBE_BPRC);
578 	IXGBE_READ_REG(hw, IXGBE_MPRC);
579 	IXGBE_READ_REG(hw, IXGBE_GPTC);
580 	IXGBE_READ_REG(hw, IXGBE_GORCL);
581 	IXGBE_READ_REG(hw, IXGBE_GORCH);
582 	IXGBE_READ_REG(hw, IXGBE_GOTCL);
583 	IXGBE_READ_REG(hw, IXGBE_GOTCH);
584 	if (hw->mac.type == ixgbe_mac_82598EB)
585 		for (i = 0; i < 8; i++)
586 			IXGBE_READ_REG(hw, IXGBE_RNBC(i));
587 	IXGBE_READ_REG(hw, IXGBE_RUC);
588 	IXGBE_READ_REG(hw, IXGBE_RFC);
589 	IXGBE_READ_REG(hw, IXGBE_ROC);
590 	IXGBE_READ_REG(hw, IXGBE_RJC);
591 	IXGBE_READ_REG(hw, IXGBE_MNGPRC);
592 	IXGBE_READ_REG(hw, IXGBE_MNGPDC);
593 	IXGBE_READ_REG(hw, IXGBE_MNGPTC);
594 	IXGBE_READ_REG(hw, IXGBE_TORL);
595 	IXGBE_READ_REG(hw, IXGBE_TORH);
596 	IXGBE_READ_REG(hw, IXGBE_TPR);
597 	IXGBE_READ_REG(hw, IXGBE_TPT);
598 	IXGBE_READ_REG(hw, IXGBE_PTC64);
599 	IXGBE_READ_REG(hw, IXGBE_PTC127);
600 	IXGBE_READ_REG(hw, IXGBE_PTC255);
601 	IXGBE_READ_REG(hw, IXGBE_PTC511);
602 	IXGBE_READ_REG(hw, IXGBE_PTC1023);
603 	IXGBE_READ_REG(hw, IXGBE_PTC1522);
604 	IXGBE_READ_REG(hw, IXGBE_MPTC);
605 	IXGBE_READ_REG(hw, IXGBE_BPTC);
606 	for (i = 0; i < 16; i++) {
607 		IXGBE_READ_REG(hw, IXGBE_QPRC(i));
608 		IXGBE_READ_REG(hw, IXGBE_QPTC(i));
609 		if (hw->mac.type >= ixgbe_mac_82599EB) {
610 			IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
611 			IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
612 			IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
613 			IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
614 			IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
615 		} else {
616 			IXGBE_READ_REG(hw, IXGBE_QBRC(i));
617 			IXGBE_READ_REG(hw, IXGBE_QBTC(i));
618 		}
619 	}
620 
621 	if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
622 		if (hw->phy.id == 0)
623 			ixgbe_identify_phy(hw);
624 		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
625 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
626 		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
627 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
628 		hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
629 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
630 		hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
631 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
632 	}
633 
634 	return IXGBE_SUCCESS;
635 }
636 
637 /**
638  *  ixgbe_read_pba_string_generic - Reads part number string from EEPROM
639  *  @hw: pointer to hardware structure
640  *  @pba_num: stores the part number string from the EEPROM
641  *  @pba_num_size: part number string buffer length
642  *
643  *  Reads the part number string from the EEPROM.
644  **/
645 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
646 				  u32 pba_num_size)
647 {
648 	s32 ret_val;
649 	u16 data;
650 	u16 pba_ptr;
651 	u16 offset;
652 	u16 length;
653 
654 	DEBUGFUNC("ixgbe_read_pba_string_generic");
655 
656 	if (pba_num == NULL) {
657 		DEBUGOUT("PBA string buffer was null\n");
658 		return IXGBE_ERR_INVALID_ARGUMENT;
659 	}
660 
661 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
662 	if (ret_val) {
663 		DEBUGOUT("NVM Read Error\n");
664 		return ret_val;
665 	}
666 
667 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
668 	if (ret_val) {
669 		DEBUGOUT("NVM Read Error\n");
670 		return ret_val;
671 	}
672 
673 	/*
674 	 * if data is not ptr guard the PBA must be in legacy format which
675 	 * means pba_ptr is actually our second data word for the PBA number
676 	 * and we can decode it into an ascii string
677 	 */
678 	if (data != IXGBE_PBANUM_PTR_GUARD) {
679 		DEBUGOUT("NVM PBA number is not stored as string\n");
680 
681 		/* we will need 11 characters to store the PBA */
682 		if (pba_num_size < 11) {
683 			DEBUGOUT("PBA string buffer too small\n");
684 			return IXGBE_ERR_NO_SPACE;
685 		}
686 
687 		/* extract hex string from data and pba_ptr */
688 		pba_num[0] = (data >> 12) & 0xF;
689 		pba_num[1] = (data >> 8) & 0xF;
690 		pba_num[2] = (data >> 4) & 0xF;
691 		pba_num[3] = data & 0xF;
692 		pba_num[4] = (pba_ptr >> 12) & 0xF;
693 		pba_num[5] = (pba_ptr >> 8) & 0xF;
694 		pba_num[6] = '-';
695 		pba_num[7] = 0;
696 		pba_num[8] = (pba_ptr >> 4) & 0xF;
697 		pba_num[9] = pba_ptr & 0xF;
698 
699 		/* put a null character on the end of our string */
700 		pba_num[10] = '\0';
701 
702 		/* switch all the data but the '-' to hex char */
703 		for (offset = 0; offset < 10; offset++) {
704 			if (pba_num[offset] < 0xA)
705 				pba_num[offset] += '0';
706 			else if (pba_num[offset] < 0x10)
707 				pba_num[offset] += 'A' - 0xA;
708 		}
709 
710 		return IXGBE_SUCCESS;
711 	}
712 
713 	ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
714 	if (ret_val) {
715 		DEBUGOUT("NVM Read Error\n");
716 		return ret_val;
717 	}
718 
719 	if (length == 0xFFFF || length == 0) {
720 		DEBUGOUT("NVM PBA number section invalid length\n");
721 		return IXGBE_ERR_PBA_SECTION;
722 	}
723 
724 	/* check if pba_num buffer is big enough */
725 	if (pba_num_size  < (((u32)length * 2) - 1)) {
726 		DEBUGOUT("PBA string buffer too small\n");
727 		return IXGBE_ERR_NO_SPACE;
728 	}
729 
730 	/* trim pba length from start of string */
731 	pba_ptr++;
732 	length--;
733 
734 	for (offset = 0; offset < length; offset++) {
735 		ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
736 		if (ret_val) {
737 			DEBUGOUT("NVM Read Error\n");
738 			return ret_val;
739 		}
740 		pba_num[offset * 2] = (u8)(data >> 8);
741 		pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
742 	}
743 	pba_num[offset * 2] = '\0';
744 
745 	return IXGBE_SUCCESS;
746 }
747 
748 /**
749  *  ixgbe_read_pba_num_generic - Reads part number from EEPROM
750  *  @hw: pointer to hardware structure
751  *  @pba_num: stores the part number from the EEPROM
752  *
753  *  Reads the part number from the EEPROM.
754  **/
755 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
756 {
757 	s32 ret_val;
758 	u16 data;
759 
760 	DEBUGFUNC("ixgbe_read_pba_num_generic");
761 
762 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
763 	if (ret_val) {
764 		DEBUGOUT("NVM Read Error\n");
765 		return ret_val;
766 	} else if (data == IXGBE_PBANUM_PTR_GUARD) {
767 		DEBUGOUT("NVM Not supported\n");
768 		return IXGBE_NOT_IMPLEMENTED;
769 	}
770 	*pba_num = (u32)(data << 16);
771 
772 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
773 	if (ret_val) {
774 		DEBUGOUT("NVM Read Error\n");
775 		return ret_val;
776 	}
777 	*pba_num |= data;
778 
779 	return IXGBE_SUCCESS;
780 }
781 
782 /**
783  *  ixgbe_read_pba_raw
784  *  @hw: pointer to the HW structure
785  *  @eeprom_buf: optional pointer to EEPROM image
786  *  @eeprom_buf_size: size of EEPROM image in words
787  *  @max_pba_block_size: PBA block size limit
788  *  @pba: pointer to output PBA structure
789  *
790  *  Reads PBA from EEPROM image when eeprom_buf is not NULL.
791  *  Reads PBA from physical EEPROM device when eeprom_buf is NULL.
792  *
793  **/
794 s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
795 		       u32 eeprom_buf_size, u16 max_pba_block_size,
796 		       struct ixgbe_pba *pba)
797 {
798 	s32 ret_val;
799 	u16 pba_block_size;
800 
801 	if (pba == NULL)
802 		return IXGBE_ERR_PARAM;
803 
804 	if (eeprom_buf == NULL) {
805 		ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
806 						     &pba->word[0]);
807 		if (ret_val)
808 			return ret_val;
809 	} else {
810 		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
811 			pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
812 			pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
813 		} else {
814 			return IXGBE_ERR_PARAM;
815 		}
816 	}
817 
818 	if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
819 		if (pba->pba_block == NULL)
820 			return IXGBE_ERR_PARAM;
821 
822 		ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
823 						   eeprom_buf_size,
824 						   &pba_block_size);
825 		if (ret_val)
826 			return ret_val;
827 
828 		if (pba_block_size > max_pba_block_size)
829 			return IXGBE_ERR_PARAM;
830 
831 		if (eeprom_buf == NULL) {
832 			ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
833 							     pba_block_size,
834 							     pba->pba_block);
835 			if (ret_val)
836 				return ret_val;
837 		} else {
838 			if (eeprom_buf_size > (u32)(pba->word[1] +
839 					      pba_block_size)) {
840 				memcpy(pba->pba_block,
841 				       &eeprom_buf[pba->word[1]],
842 				       pba_block_size * sizeof(u16));
843 			} else {
844 				return IXGBE_ERR_PARAM;
845 			}
846 		}
847 	}
848 
849 	return IXGBE_SUCCESS;
850 }
851 
852 /**
853  *  ixgbe_write_pba_raw
854  *  @hw: pointer to the HW structure
855  *  @eeprom_buf: optional pointer to EEPROM image
856  *  @eeprom_buf_size: size of EEPROM image in words
857  *  @pba: pointer to PBA structure
858  *
859  *  Writes PBA to EEPROM image when eeprom_buf is not NULL.
860  *  Writes PBA to physical EEPROM device when eeprom_buf is NULL.
861  *
862  **/
863 s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
864 			u32 eeprom_buf_size, struct ixgbe_pba *pba)
865 {
866 	s32 ret_val;
867 
868 	if (pba == NULL)
869 		return IXGBE_ERR_PARAM;
870 
871 	if (eeprom_buf == NULL) {
872 		ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
873 						      &pba->word[0]);
874 		if (ret_val)
875 			return ret_val;
876 	} else {
877 		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
878 			eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
879 			eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
880 		} else {
881 			return IXGBE_ERR_PARAM;
882 		}
883 	}
884 
885 	if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
886 		if (pba->pba_block == NULL)
887 			return IXGBE_ERR_PARAM;
888 
889 		if (eeprom_buf == NULL) {
890 			ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
891 							      pba->pba_block[0],
892 							      pba->pba_block);
893 			if (ret_val)
894 				return ret_val;
895 		} else {
896 			if (eeprom_buf_size > (u32)(pba->word[1] +
897 					      pba->pba_block[0])) {
898 				memcpy(&eeprom_buf[pba->word[1]],
899 				       pba->pba_block,
900 				       pba->pba_block[0] * sizeof(u16));
901 			} else {
902 				return IXGBE_ERR_PARAM;
903 			}
904 		}
905 	}
906 
907 	return IXGBE_SUCCESS;
908 }
909 
910 /**
911  *  ixgbe_get_pba_block_size
912  *  @hw: pointer to the HW structure
913  *  @eeprom_buf: optional pointer to EEPROM image
914  *  @eeprom_buf_size: size of EEPROM image in words
915  *  @pba_data_size: pointer to output variable
916  *
917  *  Returns the size of the PBA block in words. Function operates on EEPROM
918  *  image if the eeprom_buf pointer is not NULL otherwise it accesses physical
919  *  EEPROM device.
920  *
921  **/
922 s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
923 			     u32 eeprom_buf_size, u16 *pba_block_size)
924 {
925 	s32 ret_val;
926 	u16 pba_word[2];
927 	u16 length;
928 
929 	DEBUGFUNC("ixgbe_get_pba_block_size");
930 
931 	if (eeprom_buf == NULL) {
932 		ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
933 						     &pba_word[0]);
934 		if (ret_val)
935 			return ret_val;
936 	} else {
937 		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
938 			pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
939 			pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
940 		} else {
941 			return IXGBE_ERR_PARAM;
942 		}
943 	}
944 
945 	if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
946 		if (eeprom_buf == NULL) {
947 			ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
948 						      &length);
949 			if (ret_val)
950 				return ret_val;
951 		} else {
952 			if (eeprom_buf_size > pba_word[1])
953 				length = eeprom_buf[pba_word[1] + 0];
954 			else
955 				return IXGBE_ERR_PARAM;
956 		}
957 
958 		if (length == 0xFFFF || length == 0)
959 			return IXGBE_ERR_PBA_SECTION;
960 	} else {
961 		/* PBA number in legacy format, there is no PBA Block. */
962 		length = 0;
963 	}
964 
965 	if (pba_block_size != NULL)
966 		*pba_block_size = length;
967 
968 	return IXGBE_SUCCESS;
969 }
970 
971 /**
972  *  ixgbe_get_mac_addr_generic - Generic get MAC address
973  *  @hw: pointer to hardware structure
974  *  @mac_addr: Adapter MAC address
975  *
976  *  Reads the adapter's MAC address from first Receive Address Register (RAR0)
977  *  A reset of the adapter must be performed prior to calling this function
978  *  in order for the MAC address to have been loaded from the EEPROM into RAR0
979  **/
980 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
981 {
982 	u32 rar_high;
983 	u32 rar_low;
984 	u16 i;
985 
986 	DEBUGFUNC("ixgbe_get_mac_addr_generic");
987 
988 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
989 	rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
990 
991 	for (i = 0; i < 4; i++)
992 		mac_addr[i] = (u8)(rar_low >> (i*8));
993 
994 	for (i = 0; i < 2; i++)
995 		mac_addr[i+4] = (u8)(rar_high >> (i*8));
996 
997 	return IXGBE_SUCCESS;
998 }
999 
1000 /**
1001  *  ixgbe_set_pci_config_data_generic - Generic store PCI bus info
1002  *  @hw: pointer to hardware structure
1003  *  @link_status: the link status returned by the PCI config space
1004  *
1005  *  Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure
1006  **/
1007 void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
1008 {
1009 	struct ixgbe_mac_info *mac = &hw->mac;
1010 
1011 	if (hw->bus.type == ixgbe_bus_type_unknown)
1012 		hw->bus.type = ixgbe_bus_type_pci_express;
1013 
1014 	switch (link_status & IXGBE_PCI_LINK_WIDTH) {
1015 	case IXGBE_PCI_LINK_WIDTH_1:
1016 		hw->bus.width = ixgbe_bus_width_pcie_x1;
1017 		break;
1018 	case IXGBE_PCI_LINK_WIDTH_2:
1019 		hw->bus.width = ixgbe_bus_width_pcie_x2;
1020 		break;
1021 	case IXGBE_PCI_LINK_WIDTH_4:
1022 		hw->bus.width = ixgbe_bus_width_pcie_x4;
1023 		break;
1024 	case IXGBE_PCI_LINK_WIDTH_8:
1025 		hw->bus.width = ixgbe_bus_width_pcie_x8;
1026 		break;
1027 	default:
1028 		hw->bus.width = ixgbe_bus_width_unknown;
1029 		break;
1030 	}
1031 
1032 	switch (link_status & IXGBE_PCI_LINK_SPEED) {
1033 	case IXGBE_PCI_LINK_SPEED_2500:
1034 		hw->bus.speed = ixgbe_bus_speed_2500;
1035 		break;
1036 	case IXGBE_PCI_LINK_SPEED_5000:
1037 		hw->bus.speed = ixgbe_bus_speed_5000;
1038 		break;
1039 	case IXGBE_PCI_LINK_SPEED_8000:
1040 		hw->bus.speed = ixgbe_bus_speed_8000;
1041 		break;
1042 	default:
1043 		hw->bus.speed = ixgbe_bus_speed_unknown;
1044 		break;
1045 	}
1046 
1047 	mac->ops.set_lan_id(hw);
1048 }
1049 
1050 /**
1051  *  ixgbe_get_bus_info_generic - Generic set PCI bus info
1052  *  @hw: pointer to hardware structure
1053  *
1054  *  Gets the PCI bus info (speed, width, type) then calls helper function to
1055  *  store this data within the ixgbe_hw structure.
1056  **/
1057 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
1058 {
1059 	u16 link_status;
1060 
1061 	DEBUGFUNC("ixgbe_get_bus_info_generic");
1062 
1063 	/* Get the negotiated link width and speed from PCI config space */
1064 	link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
1065 
1066 	ixgbe_set_pci_config_data_generic(hw, link_status);
1067 
1068 	return IXGBE_SUCCESS;
1069 }
1070 
1071 /**
1072  *  ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
1073  *  @hw: pointer to the HW structure
1074  *
1075  *  Determines the LAN function id by reading memory-mapped registers and swaps
1076  *  the port value if requested, and set MAC instance for devices that share
1077  *  CS4227.
1078  **/
1079 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
1080 {
1081 	struct ixgbe_bus_info *bus = &hw->bus;
1082 	u32 reg;
1083 	u16 ee_ctrl_4;
1084 
1085 	DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
1086 
1087 	reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
1088 	bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
1089 	bus->lan_id = (u8)bus->func;
1090 
1091 	/* check for a port swap */
1092 	reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
1093 	if (reg & IXGBE_FACTPS_LFS)
1094 		bus->func ^= 0x1;
1095 
1096 	/* Get MAC instance from EEPROM for configuring CS4227 */
1097 	if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
1098 		hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
1099 		bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
1100 				   IXGBE_EE_CTRL_4_INST_ID_SHIFT;
1101 	}
1102 }
1103 
1104 /**
1105  *  ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
1106  *  @hw: pointer to hardware structure
1107  *
1108  *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
1109  *  disables transmit and receive units. The adapter_stopped flag is used by
1110  *  the shared code and drivers to determine if the adapter is in a stopped
1111  *  state and should not touch the hardware.
1112  **/
1113 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
1114 {
1115 	u32 reg_val;
1116 	u16 i;
1117 
1118 	DEBUGFUNC("ixgbe_stop_adapter_generic");
1119 
1120 	/*
1121 	 * Set the adapter_stopped flag so other driver functions stop touching
1122 	 * the hardware
1123 	 */
1124 	hw->adapter_stopped = TRUE;
1125 
1126 	/* Disable the receive unit */
1127 	ixgbe_disable_rx(hw);
1128 
1129 	/* Clear interrupt mask to stop interrupts from being generated */
1130 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
1131 
1132 	/* Clear any pending interrupts, flush previous writes */
1133 	IXGBE_READ_REG(hw, IXGBE_EICR);
1134 
1135 	/* Disable the transmit unit.  Each queue must be disabled. */
1136 	for (i = 0; i < hw->mac.max_tx_queues; i++)
1137 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
1138 
1139 	/* Disable the receive unit by stopping each queue */
1140 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
1141 		reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1142 		reg_val &= ~IXGBE_RXDCTL_ENABLE;
1143 		reg_val |= IXGBE_RXDCTL_SWFLSH;
1144 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
1145 	}
1146 
1147 	/* flush all queues disables */
1148 	IXGBE_WRITE_FLUSH(hw);
1149 	msec_delay(2);
1150 
1151 	/*
1152 	 * Prevent the PCI-E bus from hanging by disabling PCI-E master
1153 	 * access and verify no pending requests
1154 	 */
1155 	return ixgbe_disable_pcie_master(hw);
1156 }
1157 
1158 /**
1159  *  ixgbe_init_led_link_act_generic - Store the LED index link/activity.
1160  *  @hw: pointer to hardware structure
1161  *
1162  *  Store the index for the link active LED. This will be used to support
1163  *  blinking the LED.
1164  **/
1165 s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
1166 {
1167 	struct ixgbe_mac_info *mac = &hw->mac;
1168 	u32 led_reg, led_mode;
1169 	u8 i;
1170 
1171 	led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1172 
1173 	/* Get LED link active from the LEDCTL register */
1174 	for (i = 0; i < 4; i++) {
1175 		led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i);
1176 
1177 		if ((led_mode & IXGBE_LED_MODE_MASK_BASE) ==
1178 		     IXGBE_LED_LINK_ACTIVE) {
1179 			mac->led_link_act = i;
1180 			return IXGBE_SUCCESS;
1181 		}
1182 	}
1183 
1184 	/*
1185 	 * If LEDCTL register does not have the LED link active set, then use
1186 	 * known MAC defaults.
1187 	 */
1188 	switch (hw->mac.type) {
1189 	case ixgbe_mac_X550EM_a:
1190 	case ixgbe_mac_X550EM_x:
1191 		mac->led_link_act = 1;
1192 		break;
1193 	default:
1194 		mac->led_link_act = 2;
1195 	}
1196 	return IXGBE_SUCCESS;
1197 }
1198 
1199 /**
1200  *  ixgbe_led_on_generic - Turns on the software controllable LEDs.
1201  *  @hw: pointer to hardware structure
1202  *  @index: led number to turn on
1203  **/
1204 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
1205 {
1206 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1207 
1208 	DEBUGFUNC("ixgbe_led_on_generic");
1209 
1210 	if (index > 3)
1211 		return IXGBE_ERR_PARAM;
1212 
1213 	/* To turn on the LED, set mode to ON. */
1214 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
1215 	led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
1216 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1217 	IXGBE_WRITE_FLUSH(hw);
1218 
1219 	return IXGBE_SUCCESS;
1220 }
1221 
1222 /**
1223  *  ixgbe_led_off_generic - Turns off the software controllable LEDs.
1224  *  @hw: pointer to hardware structure
1225  *  @index: led number to turn off
1226  **/
1227 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
1228 {
1229 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1230 
1231 	DEBUGFUNC("ixgbe_led_off_generic");
1232 
1233 	if (index > 3)
1234 		return IXGBE_ERR_PARAM;
1235 
1236 	/* To turn off the LED, set mode to OFF. */
1237 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
1238 	led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
1239 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1240 	IXGBE_WRITE_FLUSH(hw);
1241 
1242 	return IXGBE_SUCCESS;
1243 }
1244 
1245 /**
1246  *  ixgbe_init_eeprom_params_generic - Initialize EEPROM params
1247  *  @hw: pointer to hardware structure
1248  *
1249  *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
1250  *  ixgbe_hw struct in order to set up EEPROM access.
1251  **/
1252 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
1253 {
1254 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1255 	u32 eec;
1256 	u16 eeprom_size;
1257 
1258 	DEBUGFUNC("ixgbe_init_eeprom_params_generic");
1259 
1260 	if (eeprom->type == ixgbe_eeprom_uninitialized) {
1261 		eeprom->type = ixgbe_eeprom_none;
1262 		/* Set default semaphore delay to 10ms which is a well
1263 		 * tested value */
1264 		eeprom->semaphore_delay = 10;
1265 		/* Clear EEPROM page size, it will be initialized as needed */
1266 		eeprom->word_page_size = 0;
1267 
1268 		/*
1269 		 * Check for EEPROM present first.
1270 		 * If not present leave as none
1271 		 */
1272 		eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1273 		if (eec & IXGBE_EEC_PRES) {
1274 			eeprom->type = ixgbe_eeprom_spi;
1275 
1276 			/*
1277 			 * SPI EEPROM is assumed here.  This code would need to
1278 			 * change if a future EEPROM is not SPI.
1279 			 */
1280 			eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1281 					    IXGBE_EEC_SIZE_SHIFT);
1282 			eeprom->word_size = 1 << (eeprom_size +
1283 					     IXGBE_EEPROM_WORD_SIZE_SHIFT);
1284 		}
1285 
1286 		if (eec & IXGBE_EEC_ADDR_SIZE)
1287 			eeprom->address_bits = 16;
1288 		else
1289 			eeprom->address_bits = 8;
1290 		DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
1291 			  "%d\n", eeprom->type, eeprom->word_size,
1292 			  eeprom->address_bits);
1293 	}
1294 
1295 	return IXGBE_SUCCESS;
1296 }
1297 
1298 /**
1299  *  ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
1300  *  @hw: pointer to hardware structure
1301  *  @offset: offset within the EEPROM to write
1302  *  @words: number of word(s)
1303  *  @data: 16 bit word(s) to write to EEPROM
1304  *
1305  *  Reads 16 bit word(s) from EEPROM through bit-bang method
1306  **/
1307 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1308 					       u16 words, u16 *data)
1309 {
1310 	s32 status = IXGBE_SUCCESS;
1311 	u16 i, count;
1312 
1313 	DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
1314 
1315 	hw->eeprom.ops.init_params(hw);
1316 
1317 	if (words == 0) {
1318 		status = IXGBE_ERR_INVALID_ARGUMENT;
1319 		goto out;
1320 	}
1321 
1322 	if (offset + words > hw->eeprom.word_size) {
1323 		status = IXGBE_ERR_EEPROM;
1324 		goto out;
1325 	}
1326 
1327 	/*
1328 	 * The EEPROM page size cannot be queried from the chip. We do lazy
1329 	 * initialization. It is worth to do that when we write large buffer.
1330 	 */
1331 	if ((hw->eeprom.word_page_size == 0) &&
1332 	    (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
1333 		ixgbe_detect_eeprom_page_size_generic(hw, offset);
1334 
1335 	/*
1336 	 * We cannot hold synchronization semaphores for too long
1337 	 * to avoid other entity starvation. However it is more efficient
1338 	 * to read in bursts than synchronizing access for each word.
1339 	 */
1340 	for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1341 		count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1342 			IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1343 		status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
1344 							    count, &data[i]);
1345 
1346 		if (status != IXGBE_SUCCESS)
1347 			break;
1348 	}
1349 
1350 out:
1351 	return status;
1352 }
1353 
1354 /**
1355  *  ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
1356  *  @hw: pointer to hardware structure
1357  *  @offset: offset within the EEPROM to be written to
1358  *  @words: number of word(s)
1359  *  @data: 16 bit word(s) to be written to the EEPROM
1360  *
1361  *  If ixgbe_eeprom_update_checksum is not called after this function, the
1362  *  EEPROM will most likely contain an invalid checksum.
1363  **/
1364 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1365 					      u16 words, u16 *data)
1366 {
1367 	s32 status;
1368 	u16 word;
1369 	u16 page_size;
1370 	u16 i;
1371 	u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
1372 
1373 	DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
1374 
1375 	/* Prepare the EEPROM for writing  */
1376 	status = ixgbe_acquire_eeprom(hw);
1377 
1378 	if (status == IXGBE_SUCCESS) {
1379 		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1380 			ixgbe_release_eeprom(hw);
1381 			status = IXGBE_ERR_EEPROM;
1382 		}
1383 	}
1384 
1385 	if (status == IXGBE_SUCCESS) {
1386 		for (i = 0; i < words; i++) {
1387 			ixgbe_standby_eeprom(hw);
1388 
1389 			/*  Send the WRITE ENABLE command (8 bit opcode )  */
1390 			ixgbe_shift_out_eeprom_bits(hw,
1391 						   IXGBE_EEPROM_WREN_OPCODE_SPI,
1392 						   IXGBE_EEPROM_OPCODE_BITS);
1393 
1394 			ixgbe_standby_eeprom(hw);
1395 
1396 			/*
1397 			 * Some SPI eeproms use the 8th address bit embedded
1398 			 * in the opcode
1399 			 */
1400 			if ((hw->eeprom.address_bits == 8) &&
1401 			    ((offset + i) >= 128))
1402 				write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1403 
1404 			/* Send the Write command (8-bit opcode + addr) */
1405 			ixgbe_shift_out_eeprom_bits(hw, write_opcode,
1406 						    IXGBE_EEPROM_OPCODE_BITS);
1407 			ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1408 						    hw->eeprom.address_bits);
1409 
1410 			page_size = hw->eeprom.word_page_size;
1411 
1412 			/* Send the data in burst via SPI*/
1413 			do {
1414 				word = data[i];
1415 				word = (word >> 8) | (word << 8);
1416 				ixgbe_shift_out_eeprom_bits(hw, word, 16);
1417 
1418 				if (page_size == 0)
1419 					break;
1420 
1421 				/* do not wrap around page */
1422 				if (((offset + i) & (page_size - 1)) ==
1423 				    (page_size - 1))
1424 					break;
1425 			} while (++i < words);
1426 
1427 			ixgbe_standby_eeprom(hw);
1428 			msec_delay(10);
1429 		}
1430 		/* Done with writing - release the EEPROM */
1431 		ixgbe_release_eeprom(hw);
1432 	}
1433 
1434 	return status;
1435 }
1436 
1437 /**
1438  *  ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
1439  *  @hw: pointer to hardware structure
1440  *  @offset: offset within the EEPROM to be written to
1441  *  @data: 16 bit word to be written to the EEPROM
1442  *
1443  *  If ixgbe_eeprom_update_checksum is not called after this function, the
1444  *  EEPROM will most likely contain an invalid checksum.
1445  **/
1446 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1447 {
1448 	s32 status;
1449 
1450 	DEBUGFUNC("ixgbe_write_eeprom_generic");
1451 
1452 	hw->eeprom.ops.init_params(hw);
1453 
1454 	if (offset >= hw->eeprom.word_size) {
1455 		status = IXGBE_ERR_EEPROM;
1456 		goto out;
1457 	}
1458 
1459 	status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
1460 
1461 out:
1462 	return status;
1463 }
1464 
1465 /**
1466  *  ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
1467  *  @hw: pointer to hardware structure
1468  *  @offset: offset within the EEPROM to be read
1469  *  @data: read 16 bit words(s) from EEPROM
1470  *  @words: number of word(s)
1471  *
1472  *  Reads 16 bit word(s) from EEPROM through bit-bang method
1473  **/
1474 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1475 					      u16 words, u16 *data)
1476 {
1477 	s32 status = IXGBE_SUCCESS;
1478 	u16 i, count;
1479 
1480 	DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
1481 
1482 	hw->eeprom.ops.init_params(hw);
1483 
1484 	if (words == 0) {
1485 		status = IXGBE_ERR_INVALID_ARGUMENT;
1486 		goto out;
1487 	}
1488 
1489 	if (offset + words > hw->eeprom.word_size) {
1490 		status = IXGBE_ERR_EEPROM;
1491 		goto out;
1492 	}
1493 
1494 	/*
1495 	 * We cannot hold synchronization semaphores for too long
1496 	 * to avoid other entity starvation. However it is more efficient
1497 	 * to read in bursts than synchronizing access for each word.
1498 	 */
1499 	for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1500 		count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1501 			IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1502 
1503 		status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1504 							   count, &data[i]);
1505 
1506 		if (status != IXGBE_SUCCESS)
1507 			break;
1508 	}
1509 
1510 out:
1511 	return status;
1512 }
1513 
1514 /**
1515  *  ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1516  *  @hw: pointer to hardware structure
1517  *  @offset: offset within the EEPROM to be read
1518  *  @words: number of word(s)
1519  *  @data: read 16 bit word(s) from EEPROM
1520  *
1521  *  Reads 16 bit word(s) from EEPROM through bit-bang method
1522  **/
1523 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1524 					     u16 words, u16 *data)
1525 {
1526 	s32 status;
1527 	u16 word_in;
1528 	u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1529 	u16 i;
1530 
1531 	DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
1532 
1533 	/* Prepare the EEPROM for reading  */
1534 	status = ixgbe_acquire_eeprom(hw);
1535 
1536 	if (status == IXGBE_SUCCESS) {
1537 		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1538 			ixgbe_release_eeprom(hw);
1539 			status = IXGBE_ERR_EEPROM;
1540 		}
1541 	}
1542 
1543 	if (status == IXGBE_SUCCESS) {
1544 		for (i = 0; i < words; i++) {
1545 			ixgbe_standby_eeprom(hw);
1546 			/*
1547 			 * Some SPI eeproms use the 8th address bit embedded
1548 			 * in the opcode
1549 			 */
1550 			if ((hw->eeprom.address_bits == 8) &&
1551 			    ((offset + i) >= 128))
1552 				read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1553 
1554 			/* Send the READ command (opcode + addr) */
1555 			ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1556 						    IXGBE_EEPROM_OPCODE_BITS);
1557 			ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1558 						    hw->eeprom.address_bits);
1559 
1560 			/* Read the data. */
1561 			word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1562 			data[i] = (word_in >> 8) | (word_in << 8);
1563 		}
1564 
1565 		/* End this read operation */
1566 		ixgbe_release_eeprom(hw);
1567 	}
1568 
1569 	return status;
1570 }
1571 
1572 /**
1573  *  ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1574  *  @hw: pointer to hardware structure
1575  *  @offset: offset within the EEPROM to be read
1576  *  @data: read 16 bit value from EEPROM
1577  *
1578  *  Reads 16 bit value from EEPROM through bit-bang method
1579  **/
1580 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1581 				       u16 *data)
1582 {
1583 	s32 status;
1584 
1585 	DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1586 
1587 	hw->eeprom.ops.init_params(hw);
1588 
1589 	if (offset >= hw->eeprom.word_size) {
1590 		status = IXGBE_ERR_EEPROM;
1591 		goto out;
1592 	}
1593 
1594 	status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1595 
1596 out:
1597 	return status;
1598 }
1599 
1600 /**
1601  *  ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1602  *  @hw: pointer to hardware structure
1603  *  @offset: offset of word in the EEPROM to read
1604  *  @words: number of word(s)
1605  *  @data: 16 bit word(s) from the EEPROM
1606  *
1607  *  Reads a 16 bit word(s) from the EEPROM using the EERD register.
1608  **/
1609 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1610 				   u16 words, u16 *data)
1611 {
1612 	u32 eerd;
1613 	s32 status = IXGBE_SUCCESS;
1614 	u32 i;
1615 
1616 	DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
1617 
1618 	hw->eeprom.ops.init_params(hw);
1619 
1620 	if (words == 0) {
1621 		status = IXGBE_ERR_INVALID_ARGUMENT;
1622 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1623 		goto out;
1624 	}
1625 
1626 	if (offset >= hw->eeprom.word_size) {
1627 		status = IXGBE_ERR_EEPROM;
1628 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1629 		goto out;
1630 	}
1631 
1632 	for (i = 0; i < words; i++) {
1633 		eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1634 		       IXGBE_EEPROM_RW_REG_START;
1635 
1636 		IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1637 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1638 
1639 		if (status == IXGBE_SUCCESS) {
1640 			data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1641 				   IXGBE_EEPROM_RW_REG_DATA);
1642 		} else {
1643 			DEBUGOUT("Eeprom read timed out\n");
1644 			goto out;
1645 		}
1646 	}
1647 out:
1648 	return status;
1649 }
1650 
1651 /**
1652  *  ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
1653  *  @hw: pointer to hardware structure
1654  *  @offset: offset within the EEPROM to be used as a scratch pad
1655  *
1656  *  Discover EEPROM page size by writing marching data at given offset.
1657  *  This function is called only when we are writing a new large buffer
1658  *  at given offset so the data would be overwritten anyway.
1659  **/
1660 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1661 						 u16 offset)
1662 {
1663 	u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1664 	s32 status = IXGBE_SUCCESS;
1665 	u16 i;
1666 
1667 	DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
1668 
1669 	for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1670 		data[i] = i;
1671 
1672 	hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1673 	status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1674 					     IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1675 	hw->eeprom.word_page_size = 0;
1676 	if (status != IXGBE_SUCCESS)
1677 		goto out;
1678 
1679 	status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1680 	if (status != IXGBE_SUCCESS)
1681 		goto out;
1682 
1683 	/*
1684 	 * When writing in burst more than the actual page size
1685 	 * EEPROM address wraps around current page.
1686 	 */
1687 	hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1688 
1689 	DEBUGOUT1("Detected EEPROM page size = %d words.",
1690 		  hw->eeprom.word_page_size);
1691 out:
1692 	return status;
1693 }
1694 
1695 /**
1696  *  ixgbe_read_eerd_generic - Read EEPROM word using EERD
1697  *  @hw: pointer to hardware structure
1698  *  @offset: offset of  word in the EEPROM to read
1699  *  @data: word read from the EEPROM
1700  *
1701  *  Reads a 16 bit word from the EEPROM using the EERD register.
1702  **/
1703 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1704 {
1705 	return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1706 }
1707 
1708 /**
1709  *  ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1710  *  @hw: pointer to hardware structure
1711  *  @offset: offset of  word in the EEPROM to write
1712  *  @words: number of word(s)
1713  *  @data: word(s) write to the EEPROM
1714  *
1715  *  Write a 16 bit word(s) to the EEPROM using the EEWR register.
1716  **/
1717 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1718 				    u16 words, u16 *data)
1719 {
1720 	u32 eewr;
1721 	s32 status = IXGBE_SUCCESS;
1722 	u16 i;
1723 
1724 	DEBUGFUNC("ixgbe_write_eewr_generic");
1725 
1726 	hw->eeprom.ops.init_params(hw);
1727 
1728 	if (words == 0) {
1729 		status = IXGBE_ERR_INVALID_ARGUMENT;
1730 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1731 		goto out;
1732 	}
1733 
1734 	if (offset >= hw->eeprom.word_size) {
1735 		status = IXGBE_ERR_EEPROM;
1736 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1737 		goto out;
1738 	}
1739 
1740 	for (i = 0; i < words; i++) {
1741 		eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1742 			(data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1743 			IXGBE_EEPROM_RW_REG_START;
1744 
1745 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1746 		if (status != IXGBE_SUCCESS) {
1747 			DEBUGOUT("Eeprom write EEWR timed out\n");
1748 			goto out;
1749 		}
1750 
1751 		IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1752 
1753 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1754 		if (status != IXGBE_SUCCESS) {
1755 			DEBUGOUT("Eeprom write EEWR timed out\n");
1756 			goto out;
1757 		}
1758 	}
1759 
1760 out:
1761 	return status;
1762 }
1763 
1764 /**
1765  *  ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1766  *  @hw: pointer to hardware structure
1767  *  @offset: offset of  word in the EEPROM to write
1768  *  @data: word write to the EEPROM
1769  *
1770  *  Write a 16 bit word to the EEPROM using the EEWR register.
1771  **/
1772 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1773 {
1774 	return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1775 }
1776 
1777 /**
1778  *  ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1779  *  @hw: pointer to hardware structure
1780  *  @ee_reg: EEPROM flag for polling
1781  *
1782  *  Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1783  *  read or write is done respectively.
1784  **/
1785 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1786 {
1787 	u32 i;
1788 	u32 reg;
1789 	s32 status = IXGBE_ERR_EEPROM;
1790 
1791 	DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1792 
1793 	for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1794 		if (ee_reg == IXGBE_NVM_POLL_READ)
1795 			reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1796 		else
1797 			reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1798 
1799 		if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1800 			status = IXGBE_SUCCESS;
1801 			break;
1802 		}
1803 		usec_delay(5);
1804 	}
1805 
1806 	if (i == IXGBE_EERD_EEWR_ATTEMPTS)
1807 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
1808 			     "EEPROM read/write done polling timed out");
1809 
1810 	return status;
1811 }
1812 
1813 /**
1814  *  ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1815  *  @hw: pointer to hardware structure
1816  *
1817  *  Prepares EEPROM for access using bit-bang method. This function should
1818  *  be called before issuing a command to the EEPROM.
1819  **/
1820 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1821 {
1822 	s32 status = IXGBE_SUCCESS;
1823 	u32 eec;
1824 	u32 i;
1825 
1826 	DEBUGFUNC("ixgbe_acquire_eeprom");
1827 
1828 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1829 	    != IXGBE_SUCCESS)
1830 		status = IXGBE_ERR_SWFW_SYNC;
1831 
1832 	if (status == IXGBE_SUCCESS) {
1833 		eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1834 
1835 		/* Request EEPROM Access */
1836 		eec |= IXGBE_EEC_REQ;
1837 		IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1838 
1839 		for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1840 			eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1841 			if (eec & IXGBE_EEC_GNT)
1842 				break;
1843 			usec_delay(5);
1844 		}
1845 
1846 		/* Release if grant not acquired */
1847 		if (!(eec & IXGBE_EEC_GNT)) {
1848 			eec &= ~IXGBE_EEC_REQ;
1849 			IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1850 			DEBUGOUT("Could not acquire EEPROM grant\n");
1851 
1852 			hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1853 			status = IXGBE_ERR_EEPROM;
1854 		}
1855 
1856 		/* Setup EEPROM for Read/Write */
1857 		if (status == IXGBE_SUCCESS) {
1858 			/* Clear CS and SK */
1859 			eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1860 			IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1861 			IXGBE_WRITE_FLUSH(hw);
1862 			usec_delay(1);
1863 		}
1864 	}
1865 	return status;
1866 }
1867 
1868 /**
1869  *  ixgbe_get_eeprom_semaphore - Get hardware semaphore
1870  *  @hw: pointer to hardware structure
1871  *
1872  *  Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1873  **/
1874 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1875 {
1876 	s32 status = IXGBE_ERR_EEPROM;
1877 	u32 timeout = 2000;
1878 	u32 i;
1879 	u32 swsm;
1880 
1881 	DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1882 
1883 
1884 	/* Get SMBI software semaphore between device drivers first */
1885 	for (i = 0; i < timeout; i++) {
1886 		/*
1887 		 * If the SMBI bit is 0 when we read it, then the bit will be
1888 		 * set and we have the semaphore
1889 		 */
1890 		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1891 		if (!(swsm & IXGBE_SWSM_SMBI)) {
1892 			status = IXGBE_SUCCESS;
1893 			break;
1894 		}
1895 		usec_delay(50);
1896 	}
1897 
1898 	if (i == timeout) {
1899 		DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1900 			 "not granted.\n");
1901 		/*
1902 		 * this release is particularly important because our attempts
1903 		 * above to get the semaphore may have succeeded, and if there
1904 		 * was a timeout, we should unconditionally clear the semaphore
1905 		 * bits to free the driver to make progress
1906 		 */
1907 		ixgbe_release_eeprom_semaphore(hw);
1908 
1909 		usec_delay(50);
1910 		/*
1911 		 * one last try
1912 		 * If the SMBI bit is 0 when we read it, then the bit will be
1913 		 * set and we have the semaphore
1914 		 */
1915 		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1916 		if (!(swsm & IXGBE_SWSM_SMBI))
1917 			status = IXGBE_SUCCESS;
1918 	}
1919 
1920 	/* Now get the semaphore between SW/FW through the SWESMBI bit */
1921 	if (status == IXGBE_SUCCESS) {
1922 		for (i = 0; i < timeout; i++) {
1923 			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1924 
1925 			/* Set the SW EEPROM semaphore bit to request access */
1926 			swsm |= IXGBE_SWSM_SWESMBI;
1927 			IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm);
1928 
1929 			/*
1930 			 * If we set the bit successfully then we got the
1931 			 * semaphore.
1932 			 */
1933 			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1934 			if (swsm & IXGBE_SWSM_SWESMBI)
1935 				break;
1936 
1937 			usec_delay(50);
1938 		}
1939 
1940 		/*
1941 		 * Release semaphores and return error if SW EEPROM semaphore
1942 		 * was not granted because we don't have access to the EEPROM
1943 		 */
1944 		if (i >= timeout) {
1945 			ERROR_REPORT1(IXGBE_ERROR_POLLING,
1946 			    "SWESMBI Software EEPROM semaphore not granted.\n");
1947 			ixgbe_release_eeprom_semaphore(hw);
1948 			status = IXGBE_ERR_EEPROM;
1949 		}
1950 	} else {
1951 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
1952 			     "Software semaphore SMBI between device drivers "
1953 			     "not granted.\n");
1954 	}
1955 
1956 	return status;
1957 }
1958 
1959 /**
1960  *  ixgbe_release_eeprom_semaphore - Release hardware semaphore
1961  *  @hw: pointer to hardware structure
1962  *
1963  *  This function clears hardware semaphore bits.
1964  **/
1965 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1966 {
1967 	u32 swsm;
1968 
1969 	DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1970 
1971 	swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1972 
1973 	/* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1974 	swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1975 	IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1976 	IXGBE_WRITE_FLUSH(hw);
1977 }
1978 
1979 /**
1980  *  ixgbe_ready_eeprom - Polls for EEPROM ready
1981  *  @hw: pointer to hardware structure
1982  **/
1983 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1984 {
1985 	s32 status = IXGBE_SUCCESS;
1986 	u16 i;
1987 	u8 spi_stat_reg;
1988 
1989 	DEBUGFUNC("ixgbe_ready_eeprom");
1990 
1991 	/*
1992 	 * Read "Status Register" repeatedly until the LSB is cleared.  The
1993 	 * EEPROM will signal that the command has been completed by clearing
1994 	 * bit 0 of the internal status register.  If it's not cleared within
1995 	 * 5 milliseconds, then error out.
1996 	 */
1997 	for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1998 		ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1999 					    IXGBE_EEPROM_OPCODE_BITS);
2000 		spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
2001 		if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
2002 			break;
2003 
2004 		usec_delay(5);
2005 		ixgbe_standby_eeprom(hw);
2006 	}
2007 
2008 	/*
2009 	 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
2010 	 * devices (and only 0-5mSec on 5V devices)
2011 	 */
2012 	if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
2013 		DEBUGOUT("SPI EEPROM Status error\n");
2014 		status = IXGBE_ERR_EEPROM;
2015 	}
2016 
2017 	return status;
2018 }
2019 
2020 /**
2021  *  ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
2022  *  @hw: pointer to hardware structure
2023  **/
2024 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
2025 {
2026 	u32 eec;
2027 
2028 	DEBUGFUNC("ixgbe_standby_eeprom");
2029 
2030 	eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2031 
2032 	/* Toggle CS to flush commands */
2033 	eec |= IXGBE_EEC_CS;
2034 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2035 	IXGBE_WRITE_FLUSH(hw);
2036 	usec_delay(1);
2037 	eec &= ~IXGBE_EEC_CS;
2038 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2039 	IXGBE_WRITE_FLUSH(hw);
2040 	usec_delay(1);
2041 }
2042 
2043 /**
2044  *  ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
2045  *  @hw: pointer to hardware structure
2046  *  @data: data to send to the EEPROM
2047  *  @count: number of bits to shift out
2048  **/
2049 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
2050 					u16 count)
2051 {
2052 	u32 eec;
2053 	u32 mask;
2054 	u32 i;
2055 
2056 	DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
2057 
2058 	eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2059 
2060 	/*
2061 	 * Mask is used to shift "count" bits of "data" out to the EEPROM
2062 	 * one bit at a time.  Determine the starting bit based on count
2063 	 */
2064 	mask = 0x01 << (count - 1);
2065 
2066 	for (i = 0; i < count; i++) {
2067 		/*
2068 		 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
2069 		 * "1", and then raising and then lowering the clock (the SK
2070 		 * bit controls the clock input to the EEPROM).  A "0" is
2071 		 * shifted out to the EEPROM by setting "DI" to "0" and then
2072 		 * raising and then lowering the clock.
2073 		 */
2074 		if (data & mask)
2075 			eec |= IXGBE_EEC_DI;
2076 		else
2077 			eec &= ~IXGBE_EEC_DI;
2078 
2079 		IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2080 		IXGBE_WRITE_FLUSH(hw);
2081 
2082 		usec_delay(1);
2083 
2084 		ixgbe_raise_eeprom_clk(hw, &eec);
2085 		ixgbe_lower_eeprom_clk(hw, &eec);
2086 
2087 		/*
2088 		 * Shift mask to signify next bit of data to shift in to the
2089 		 * EEPROM
2090 		 */
2091 		mask = mask >> 1;
2092 	}
2093 
2094 	/* We leave the "DI" bit set to "0" when we leave this routine. */
2095 	eec &= ~IXGBE_EEC_DI;
2096 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2097 	IXGBE_WRITE_FLUSH(hw);
2098 }
2099 
2100 /**
2101  *  ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
2102  *  @hw: pointer to hardware structure
2103  *  @count: number of bits to shift
2104  **/
2105 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
2106 {
2107 	u32 eec;
2108 	u32 i;
2109 	u16 data = 0;
2110 
2111 	DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
2112 
2113 	/*
2114 	 * In order to read a register from the EEPROM, we need to shift
2115 	 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
2116 	 * the clock input to the EEPROM (setting the SK bit), and then reading
2117 	 * the value of the "DO" bit.  During this "shifting in" process the
2118 	 * "DI" bit should always be clear.
2119 	 */
2120 	eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2121 
2122 	eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
2123 
2124 	for (i = 0; i < count; i++) {
2125 		data = data << 1;
2126 		ixgbe_raise_eeprom_clk(hw, &eec);
2127 
2128 		eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2129 
2130 		eec &= ~(IXGBE_EEC_DI);
2131 		if (eec & IXGBE_EEC_DO)
2132 			data |= 1;
2133 
2134 		ixgbe_lower_eeprom_clk(hw, &eec);
2135 	}
2136 
2137 	return data;
2138 }
2139 
2140 /**
2141  *  ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
2142  *  @hw: pointer to hardware structure
2143  *  @eec: EEC register's current value
2144  **/
2145 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2146 {
2147 	DEBUGFUNC("ixgbe_raise_eeprom_clk");
2148 
2149 	/*
2150 	 * Raise the clock input to the EEPROM
2151 	 * (setting the SK bit), then delay
2152 	 */
2153 	*eec = *eec | IXGBE_EEC_SK;
2154 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
2155 	IXGBE_WRITE_FLUSH(hw);
2156 	usec_delay(1);
2157 }
2158 
2159 /**
2160  *  ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
2161  *  @hw: pointer to hardware structure
2162  *  @eec: EEC's current value
2163  **/
2164 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2165 {
2166 	DEBUGFUNC("ixgbe_lower_eeprom_clk");
2167 
2168 	/*
2169 	 * Lower the clock input to the EEPROM (clearing the SK bit), then
2170 	 * delay
2171 	 */
2172 	*eec = *eec & ~IXGBE_EEC_SK;
2173 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
2174 	IXGBE_WRITE_FLUSH(hw);
2175 	usec_delay(1);
2176 }
2177 
2178 /**
2179  *  ixgbe_release_eeprom - Release EEPROM, release semaphores
2180  *  @hw: pointer to hardware structure
2181  **/
2182 static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
2183 {
2184 	u32 eec;
2185 
2186 	DEBUGFUNC("ixgbe_release_eeprom");
2187 
2188 	eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2189 
2190 	eec |= IXGBE_EEC_CS;  /* Pull CS high */
2191 	eec &= ~IXGBE_EEC_SK; /* Lower SCK */
2192 
2193 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2194 	IXGBE_WRITE_FLUSH(hw);
2195 
2196 	usec_delay(1);
2197 
2198 	/* Stop requesting EEPROM access */
2199 	eec &= ~IXGBE_EEC_REQ;
2200 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2201 
2202 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
2203 
2204 	/* Delay before attempt to obtain semaphore again to allow FW access */
2205 	msec_delay(hw->eeprom.semaphore_delay);
2206 }
2207 
2208 /**
2209  *  ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
2210  *  @hw: pointer to hardware structure
2211  *
2212  *  Returns a negative error code on error, or the 16-bit checksum
2213  **/
2214 s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
2215 {
2216 	u16 i;
2217 	u16 j;
2218 	u16 checksum = 0;
2219 	u16 length = 0;
2220 	u16 pointer = 0;
2221 	u16 word = 0;
2222 
2223 	DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
2224 
2225 	/* Include 0x0-0x3F in the checksum */
2226 	for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
2227 		if (hw->eeprom.ops.read(hw, i, &word)) {
2228 			DEBUGOUT("EEPROM read failed\n");
2229 			return IXGBE_ERR_EEPROM;
2230 		}
2231 		checksum += word;
2232 	}
2233 
2234 	/* Include all data from pointers except for the fw pointer */
2235 	for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
2236 		if (hw->eeprom.ops.read(hw, i, &pointer)) {
2237 			DEBUGOUT("EEPROM read failed\n");
2238 			return IXGBE_ERR_EEPROM;
2239 		}
2240 
2241 		/* If the pointer seems invalid */
2242 		if (pointer == 0xFFFF || pointer == 0)
2243 			continue;
2244 
2245 		if (hw->eeprom.ops.read(hw, pointer, &length)) {
2246 			DEBUGOUT("EEPROM read failed\n");
2247 			return IXGBE_ERR_EEPROM;
2248 		}
2249 
2250 		if (length == 0xFFFF || length == 0)
2251 			continue;
2252 
2253 		for (j = pointer + 1; j <= pointer + length; j++) {
2254 			if (hw->eeprom.ops.read(hw, j, &word)) {
2255 				DEBUGOUT("EEPROM read failed\n");
2256 				return IXGBE_ERR_EEPROM;
2257 			}
2258 			checksum += word;
2259 		}
2260 	}
2261 
2262 	checksum = (u16)IXGBE_EEPROM_SUM - checksum;
2263 
2264 	return (s32)checksum;
2265 }
2266 
2267 /**
2268  *  ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
2269  *  @hw: pointer to hardware structure
2270  *  @checksum_val: calculated checksum
2271  *
2272  *  Performs checksum calculation and validates the EEPROM checksum.  If the
2273  *  caller does not need checksum_val, the value can be NULL.
2274  **/
2275 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
2276 					   u16 *checksum_val)
2277 {
2278 	s32 status;
2279 	u16 checksum;
2280 	u16 read_checksum = 0;
2281 
2282 	DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
2283 
2284 	/* Read the first word from the EEPROM. If this times out or fails, do
2285 	 * not continue or we could be in for a very long wait while every
2286 	 * EEPROM read fails
2287 	 */
2288 	status = hw->eeprom.ops.read(hw, 0, &checksum);
2289 	if (status) {
2290 		DEBUGOUT("EEPROM read failed\n");
2291 		return status;
2292 	}
2293 
2294 	status = hw->eeprom.ops.calc_checksum(hw);
2295 	if (status < 0)
2296 		return status;
2297 
2298 	checksum = (u16)(status & 0xffff);
2299 
2300 	status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
2301 	if (status) {
2302 		DEBUGOUT("EEPROM read failed\n");
2303 		return status;
2304 	}
2305 
2306 	/* Verify read checksum from EEPROM is the same as
2307 	 * calculated checksum
2308 	 */
2309 	if (read_checksum != checksum)
2310 		status = IXGBE_ERR_EEPROM_CHECKSUM;
2311 
2312 	/* If the user cares, return the calculated checksum */
2313 	if (checksum_val)
2314 		*checksum_val = checksum;
2315 
2316 	return status;
2317 }
2318 
2319 /**
2320  *  ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
2321  *  @hw: pointer to hardware structure
2322  **/
2323 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
2324 {
2325 	s32 status;
2326 	u16 checksum;
2327 
2328 	DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
2329 
2330 	/* Read the first word from the EEPROM. If this times out or fails, do
2331 	 * not continue or we could be in for a very long wait while every
2332 	 * EEPROM read fails
2333 	 */
2334 	status = hw->eeprom.ops.read(hw, 0, &checksum);
2335 	if (status) {
2336 		DEBUGOUT("EEPROM read failed\n");
2337 		return status;
2338 	}
2339 
2340 	status = hw->eeprom.ops.calc_checksum(hw);
2341 	if (status < 0)
2342 		return status;
2343 
2344 	checksum = (u16)(status & 0xffff);
2345 
2346 	status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
2347 
2348 	return status;
2349 }
2350 
2351 /**
2352  *  ixgbe_validate_mac_addr - Validate MAC address
2353  *  @mac_addr: pointer to MAC address.
2354  *
2355  *  Tests a MAC address to ensure it is a valid Individual Address.
2356  **/
2357 s32 ixgbe_validate_mac_addr(u8 *mac_addr)
2358 {
2359 	s32 status = IXGBE_SUCCESS;
2360 
2361 	DEBUGFUNC("ixgbe_validate_mac_addr");
2362 
2363 	/* Make sure it is not a multicast address */
2364 	if (IXGBE_IS_MULTICAST(mac_addr)) {
2365 		status = IXGBE_ERR_INVALID_MAC_ADDR;
2366 	/* Not a broadcast address */
2367 	} else if (IXGBE_IS_BROADCAST(mac_addr)) {
2368 		status = IXGBE_ERR_INVALID_MAC_ADDR;
2369 	/* Reject the zero address */
2370 	} else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
2371 		   mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
2372 		status = IXGBE_ERR_INVALID_MAC_ADDR;
2373 	}
2374 	return status;
2375 }
2376 
2377 /**
2378  *  ixgbe_set_rar_generic - Set Rx address register
2379  *  @hw: pointer to hardware structure
2380  *  @index: Receive address register to write
2381  *  @addr: Address to put into receive address register
2382  *  @vmdq: VMDq "set" or "pool" index
2383  *  @enable_addr: set flag that address is active
2384  *
2385  *  Puts an ethernet address into a receive address register.
2386  **/
2387 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
2388 			  u32 enable_addr)
2389 {
2390 	u32 rar_low, rar_high;
2391 	u32 rar_entries = hw->mac.num_rar_entries;
2392 
2393 	DEBUGFUNC("ixgbe_set_rar_generic");
2394 
2395 	/* Make sure we are using a valid rar index range */
2396 	if (index >= rar_entries) {
2397 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2398 			     "RAR index %d is out of range.\n", index);
2399 		return IXGBE_ERR_INVALID_ARGUMENT;
2400 	}
2401 
2402 	/* setup VMDq pool selection before this RAR gets enabled */
2403 	hw->mac.ops.set_vmdq(hw, index, vmdq);
2404 
2405 	/*
2406 	 * HW expects these in little endian so we reverse the byte
2407 	 * order from network order (big endian) to little endian
2408 	 */
2409 	rar_low = ((u32)addr[0] |
2410 		   ((u32)addr[1] << 8) |
2411 		   ((u32)addr[2] << 16) |
2412 		   ((u32)addr[3] << 24));
2413 	/*
2414 	 * Some parts put the VMDq setting in the extra RAH bits,
2415 	 * so save everything except the lower 16 bits that hold part
2416 	 * of the address and the address valid bit.
2417 	 */
2418 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2419 	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2420 	rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
2421 
2422 	if (enable_addr != 0)
2423 		rar_high |= IXGBE_RAH_AV;
2424 
2425 	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
2426 	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2427 
2428 	return IXGBE_SUCCESS;
2429 }
2430 
2431 /**
2432  *  ixgbe_clear_rar_generic - Remove Rx address register
2433  *  @hw: pointer to hardware structure
2434  *  @index: Receive address register to write
2435  *
2436  *  Clears an ethernet address from a receive address register.
2437  **/
2438 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
2439 {
2440 	u32 rar_high;
2441 	u32 rar_entries = hw->mac.num_rar_entries;
2442 
2443 	DEBUGFUNC("ixgbe_clear_rar_generic");
2444 
2445 	/* Make sure we are using a valid rar index range */
2446 	if (index >= rar_entries) {
2447 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2448 			     "RAR index %d is out of range.\n", index);
2449 		return IXGBE_ERR_INVALID_ARGUMENT;
2450 	}
2451 
2452 	/*
2453 	 * Some parts put the VMDq setting in the extra RAH bits,
2454 	 * so save everything except the lower 16 bits that hold part
2455 	 * of the address and the address valid bit.
2456 	 */
2457 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2458 	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2459 
2460 	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
2461 	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2462 
2463 	/* clear VMDq pool/queue selection for this RAR */
2464 	hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
2465 
2466 	return IXGBE_SUCCESS;
2467 }
2468 
2469 /**
2470  *  ixgbe_init_rx_addrs_generic - Initializes receive address filters.
2471  *  @hw: pointer to hardware structure
2472  *
2473  *  Places the MAC address in receive address register 0 and clears the rest
2474  *  of the receive address registers. Clears the multicast table. Assumes
2475  *  the receiver is in reset when the routine is called.
2476  **/
2477 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
2478 {
2479 	u32 i;
2480 	u32 rar_entries = hw->mac.num_rar_entries;
2481 
2482 	DEBUGFUNC("ixgbe_init_rx_addrs_generic");
2483 
2484 	/*
2485 	 * If the current mac address is valid, assume it is a software override
2486 	 * to the permanent address.
2487 	 * Otherwise, use the permanent address from the eeprom.
2488 	 */
2489 	if (ixgbe_validate_mac_addr(hw->mac.addr) ==
2490 	    IXGBE_ERR_INVALID_MAC_ADDR) {
2491 		/* Get the MAC address from the RAR0 for later reference */
2492 		hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2493 
2494 		DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
2495 			  hw->mac.addr[0], hw->mac.addr[1],
2496 			  hw->mac.addr[2]);
2497 		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2498 			  hw->mac.addr[4], hw->mac.addr[5]);
2499 	} else {
2500 		/* Setup the receive address. */
2501 		DEBUGOUT("Overriding MAC Address in RAR[0]\n");
2502 		DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
2503 			  hw->mac.addr[0], hw->mac.addr[1],
2504 			  hw->mac.addr[2]);
2505 		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2506 			  hw->mac.addr[4], hw->mac.addr[5]);
2507 
2508 		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2509 	}
2510 
2511 	/* clear VMDq pool/queue selection for RAR 0 */
2512 	hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
2513 
2514 	hw->addr_ctrl.overflow_promisc = 0;
2515 
2516 	hw->addr_ctrl.rar_used_count = 1;
2517 
2518 	/* Zero out the other receive addresses. */
2519 	DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
2520 	for (i = 1; i < rar_entries; i++) {
2521 		IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
2522 		IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
2523 	}
2524 
2525 	/* Clear the MTA */
2526 	hw->addr_ctrl.mta_in_use = 0;
2527 	IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2528 
2529 	DEBUGOUT(" Clearing MTA\n");
2530 	for (i = 0; i < hw->mac.mcft_size; i++)
2531 		IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
2532 
2533 	ixgbe_init_uta_tables(hw);
2534 
2535 	return IXGBE_SUCCESS;
2536 }
2537 
2538 /**
2539  *  ixgbe_add_uc_addr - Adds a secondary unicast address.
2540  *  @hw: pointer to hardware structure
2541  *  @addr: new address
2542  *  @vmdq: VMDq "set" or "pool" index
2543  *
2544  *  Adds it to unused receive address register or goes into promiscuous mode.
2545  **/
2546 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
2547 {
2548 	u32 rar_entries = hw->mac.num_rar_entries;
2549 	u32 rar;
2550 
2551 	DEBUGFUNC("ixgbe_add_uc_addr");
2552 
2553 	DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
2554 		  addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
2555 
2556 	/*
2557 	 * Place this address in the RAR if there is room,
2558 	 * else put the controller into promiscuous mode
2559 	 */
2560 	if (hw->addr_ctrl.rar_used_count < rar_entries) {
2561 		rar = hw->addr_ctrl.rar_used_count;
2562 		hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2563 		DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
2564 		hw->addr_ctrl.rar_used_count++;
2565 	} else {
2566 		hw->addr_ctrl.overflow_promisc++;
2567 	}
2568 
2569 	DEBUGOUT("ixgbe_add_uc_addr Complete\n");
2570 }
2571 
2572 /**
2573  *  ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
2574  *  @hw: pointer to hardware structure
2575  *  @addr_list: the list of new addresses
2576  *  @addr_count: number of addresses
2577  *  @next: iterator function to walk the address list
2578  *
2579  *  The given list replaces any existing list.  Clears the secondary addrs from
2580  *  receive address registers.  Uses unused receive address registers for the
2581  *  first secondary addresses, and falls back to promiscuous mode as needed.
2582  *
2583  *  Drivers using secondary unicast addresses must set user_set_promisc when
2584  *  manually putting the device into promiscuous mode.
2585  **/
2586 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
2587 				      u32 addr_count, ixgbe_mc_addr_itr next)
2588 {
2589 	u8 *addr;
2590 	u32 i;
2591 	u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
2592 	u32 uc_addr_in_use;
2593 	u32 fctrl;
2594 	u32 vmdq;
2595 
2596 	DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
2597 
2598 	/*
2599 	 * Clear accounting of old secondary address list,
2600 	 * don't count RAR[0]
2601 	 */
2602 	uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
2603 	hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
2604 	hw->addr_ctrl.overflow_promisc = 0;
2605 
2606 	/* Zero out the other receive addresses */
2607 	DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
2608 	for (i = 0; i < uc_addr_in_use; i++) {
2609 		IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
2610 		IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
2611 	}
2612 
2613 	/* Add the new addresses */
2614 	for (i = 0; i < addr_count; i++) {
2615 		DEBUGOUT(" Adding the secondary addresses:\n");
2616 		addr = next(hw, &addr_list, &vmdq);
2617 		ixgbe_add_uc_addr(hw, addr, vmdq);
2618 	}
2619 
2620 	if (hw->addr_ctrl.overflow_promisc) {
2621 		/* enable promisc if not already in overflow or set by user */
2622 		if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2623 			DEBUGOUT(" Entering address overflow promisc mode\n");
2624 			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2625 			fctrl |= IXGBE_FCTRL_UPE;
2626 			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2627 		}
2628 	} else {
2629 		/* only disable if set by overflow, not by user */
2630 		if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2631 			DEBUGOUT(" Leaving address overflow promisc mode\n");
2632 			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2633 			fctrl &= ~IXGBE_FCTRL_UPE;
2634 			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2635 		}
2636 	}
2637 
2638 	DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
2639 	return IXGBE_SUCCESS;
2640 }
2641 
2642 /**
2643  *  ixgbe_mta_vector - Determines bit-vector in multicast table to set
2644  *  @hw: pointer to hardware structure
2645  *  @mc_addr: the multicast address
2646  *
2647  *  Extracts the 12 bits, from a multicast address, to determine which
2648  *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
2649  *  incoming rx multicast addresses, to determine the bit-vector to check in
2650  *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
2651  *  by the MO field of the MCSTCTRL. The MO field is set during initialization
2652  *  to mc_filter_type.
2653  **/
2654 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
2655 {
2656 	u32 vector = 0;
2657 
2658 	DEBUGFUNC("ixgbe_mta_vector");
2659 
2660 	switch (hw->mac.mc_filter_type) {
2661 	case 0:   /* use bits [47:36] of the address */
2662 		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
2663 		break;
2664 	case 1:   /* use bits [46:35] of the address */
2665 		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
2666 		break;
2667 	case 2:   /* use bits [45:34] of the address */
2668 		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
2669 		break;
2670 	case 3:   /* use bits [43:32] of the address */
2671 		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
2672 		break;
2673 	default:  /* Invalid mc_filter_type */
2674 		DEBUGOUT("MC filter type param set incorrectly\n");
2675 		ASSERT(0);
2676 		break;
2677 	}
2678 
2679 	/* vector can only be 12-bits or boundary will be exceeded */
2680 	vector &= 0xFFF;
2681 	return vector;
2682 }
2683 
2684 /**
2685  *  ixgbe_set_mta - Set bit-vector in multicast table
2686  *  @hw: pointer to hardware structure
2687  *  @mc_addr: Multicast address
2688  *
2689  *  Sets the bit-vector in the multicast table.
2690  **/
2691 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
2692 {
2693 	u32 vector;
2694 	u32 vector_bit;
2695 	u32 vector_reg;
2696 
2697 	DEBUGFUNC("ixgbe_set_mta");
2698 
2699 	hw->addr_ctrl.mta_in_use++;
2700 
2701 	vector = ixgbe_mta_vector(hw, mc_addr);
2702 	DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
2703 
2704 	/*
2705 	 * The MTA is a register array of 128 32-bit registers. It is treated
2706 	 * like an array of 4096 bits.  We want to set bit
2707 	 * BitArray[vector_value]. So we figure out what register the bit is
2708 	 * in, read it, OR in the new bit, then write back the new value.  The
2709 	 * register is determined by the upper 7 bits of the vector value and
2710 	 * the bit within that register are determined by the lower 5 bits of
2711 	 * the value.
2712 	 */
2713 	vector_reg = (vector >> 5) & 0x7F;
2714 	vector_bit = vector & 0x1F;
2715 	hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2716 }
2717 
2718 /**
2719  *  ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2720  *  @hw: pointer to hardware structure
2721  *  @mc_addr_list: the list of new multicast addresses
2722  *  @mc_addr_count: number of addresses
2723  *  @next: iterator function to walk the multicast address list
2724  *  @clear: flag, when set clears the table beforehand
2725  *
2726  *  When the clear flag is set, the given list replaces any existing list.
2727  *  Hashes the given addresses into the multicast table.
2728  **/
2729 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
2730 				      u32 mc_addr_count, ixgbe_mc_addr_itr next,
2731 				      bool clear)
2732 {
2733 	u32 i;
2734 	u32 vmdq;
2735 
2736 	DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2737 
2738 	/*
2739 	 * Set the new number of MC addresses that we are being requested to
2740 	 * use.
2741 	 */
2742 	hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2743 	hw->addr_ctrl.mta_in_use = 0;
2744 
2745 	/* Clear mta_shadow */
2746 	if (clear) {
2747 		DEBUGOUT(" Clearing MTA\n");
2748 		memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2749 	}
2750 
2751 	/* Update mta_shadow */
2752 	for (i = 0; i < mc_addr_count; i++) {
2753 		DEBUGOUT(" Adding the multicast addresses:\n");
2754 		ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2755 	}
2756 
2757 	/* Enable mta */
2758 	for (i = 0; i < hw->mac.mcft_size; i++)
2759 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2760 				      hw->mac.mta_shadow[i]);
2761 
2762 	if (hw->addr_ctrl.mta_in_use > 0)
2763 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2764 				IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2765 
2766 	DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2767 	return IXGBE_SUCCESS;
2768 }
2769 
2770 /**
2771  *  ixgbe_enable_mc_generic - Enable multicast address in RAR
2772  *  @hw: pointer to hardware structure
2773  *
2774  *  Enables multicast address in RAR and the use of the multicast hash table.
2775  **/
2776 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2777 {
2778 	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2779 
2780 	DEBUGFUNC("ixgbe_enable_mc_generic");
2781 
2782 	if (a->mta_in_use > 0)
2783 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2784 				hw->mac.mc_filter_type);
2785 
2786 	return IXGBE_SUCCESS;
2787 }
2788 
2789 /**
2790  *  ixgbe_disable_mc_generic - Disable multicast address in RAR
2791  *  @hw: pointer to hardware structure
2792  *
2793  *  Disables multicast address in RAR and the use of the multicast hash table.
2794  **/
2795 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2796 {
2797 	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2798 
2799 	DEBUGFUNC("ixgbe_disable_mc_generic");
2800 
2801 	if (a->mta_in_use > 0)
2802 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2803 
2804 	return IXGBE_SUCCESS;
2805 }
2806 
2807 /**
2808  *  ixgbe_fc_enable_generic - Enable flow control
2809  *  @hw: pointer to hardware structure
2810  *
2811  *  Enable flow control according to the current settings.
2812  **/
2813 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2814 {
2815 	s32 ret_val = IXGBE_SUCCESS;
2816 	u32 mflcn_reg, fccfg_reg;
2817 	u32 reg;
2818 	u32 fcrtl, fcrth;
2819 	int i;
2820 
2821 	DEBUGFUNC("ixgbe_fc_enable_generic");
2822 
2823 	/* Validate the water mark configuration */
2824 	if (!hw->fc.pause_time) {
2825 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2826 		goto out;
2827 	}
2828 
2829 	/* Low water mark of zero causes XOFF floods */
2830 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2831 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2832 		    hw->fc.high_water[i]) {
2833 			if (!hw->fc.low_water[i] ||
2834 			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2835 				DEBUGOUT("Invalid water mark configuration\n");
2836 				ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2837 				goto out;
2838 			}
2839 		}
2840 	}
2841 
2842 	/* Negotiate the fc mode to use */
2843 	hw->mac.ops.fc_autoneg(hw);
2844 
2845 	/* Disable any previous flow control settings */
2846 	mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2847 	mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2848 
2849 	fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2850 	fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2851 
2852 	/*
2853 	 * The possible values of fc.current_mode are:
2854 	 * 0: Flow control is completely disabled
2855 	 * 1: Rx flow control is enabled (we can receive pause frames,
2856 	 *    but not send pause frames).
2857 	 * 2: Tx flow control is enabled (we can send pause frames but
2858 	 *    we do not support receiving pause frames).
2859 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2860 	 * other: Invalid.
2861 	 */
2862 	switch (hw->fc.current_mode) {
2863 	case ixgbe_fc_none:
2864 		/*
2865 		 * Flow control is disabled by software override or autoneg.
2866 		 * The code below will actually disable it in the HW.
2867 		 */
2868 		break;
2869 	case ixgbe_fc_rx_pause:
2870 		/*
2871 		 * Rx Flow control is enabled and Tx Flow control is
2872 		 * disabled by software override. Since there really
2873 		 * isn't a way to advertise that we are capable of RX
2874 		 * Pause ONLY, we will advertise that we support both
2875 		 * symmetric and asymmetric Rx PAUSE.  Later, we will
2876 		 * disable the adapter's ability to send PAUSE frames.
2877 		 */
2878 		mflcn_reg |= IXGBE_MFLCN_RFCE;
2879 		break;
2880 	case ixgbe_fc_tx_pause:
2881 		/*
2882 		 * Tx Flow control is enabled, and Rx Flow control is
2883 		 * disabled by software override.
2884 		 */
2885 		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2886 		break;
2887 	case ixgbe_fc_full:
2888 		/* Flow control (both Rx and Tx) is enabled by SW override. */
2889 		mflcn_reg |= IXGBE_MFLCN_RFCE;
2890 		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2891 		break;
2892 	default:
2893 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
2894 			     "Flow control param set incorrectly\n");
2895 		ret_val = IXGBE_ERR_CONFIG;
2896 		goto out;
2897 		break;
2898 	}
2899 
2900 	/* Set 802.3x based flow control settings. */
2901 	mflcn_reg |= IXGBE_MFLCN_DPF;
2902 	IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2903 	IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2904 
2905 
2906 	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
2907 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2908 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2909 		    hw->fc.high_water[i]) {
2910 			fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2911 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2912 			fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2913 		} else {
2914 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2915 			/*
2916 			 * In order to prevent Tx hangs when the internal Tx
2917 			 * switch is enabled we must set the high water mark
2918 			 * to the Rx packet buffer size - 24KB.  This allows
2919 			 * the Tx switch to function even under heavy Rx
2920 			 * workloads.
2921 			 */
2922 			fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
2923 		}
2924 
2925 		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2926 	}
2927 
2928 	/* Configure pause time (2 TCs per register) */
2929 	reg = hw->fc.pause_time * 0x00010001;
2930 	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2931 		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2932 
2933 	/* Configure flow control refresh threshold value */
2934 	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2935 
2936 out:
2937 	return ret_val;
2938 }
2939 
2940 /**
2941  *  ixgbe_negotiate_fc - Negotiate flow control
2942  *  @hw: pointer to hardware structure
2943  *  @adv_reg: flow control advertised settings
2944  *  @lp_reg: link partner's flow control settings
2945  *  @adv_sym: symmetric pause bit in advertisement
2946  *  @adv_asm: asymmetric pause bit in advertisement
2947  *  @lp_sym: symmetric pause bit in link partner advertisement
2948  *  @lp_asm: asymmetric pause bit in link partner advertisement
2949  *
2950  *  Find the intersection between advertised settings and link partner's
2951  *  advertised settings
2952  **/
2953 s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2954 		       u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2955 {
2956 	if ((!(adv_reg)) ||  (!(lp_reg))) {
2957 		ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
2958 			     "Local or link partner's advertised flow control "
2959 			     "settings are NULL. Local: %x, link partner: %x\n",
2960 			     adv_reg, lp_reg);
2961 		return IXGBE_ERR_FC_NOT_NEGOTIATED;
2962 	}
2963 
2964 	if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2965 		/*
2966 		 * Now we need to check if the user selected Rx ONLY
2967 		 * of pause frames.  In this case, we had to advertise
2968 		 * FULL flow control because we could not advertise RX
2969 		 * ONLY. Hence, we must now check to see if we need to
2970 		 * turn OFF the TRANSMISSION of PAUSE frames.
2971 		 */
2972 		if (hw->fc.requested_mode == ixgbe_fc_full) {
2973 			hw->fc.current_mode = ixgbe_fc_full;
2974 			DEBUGOUT("Flow Control = FULL.\n");
2975 		} else {
2976 			hw->fc.current_mode = ixgbe_fc_rx_pause;
2977 			DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2978 		}
2979 	} else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2980 		   (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2981 		hw->fc.current_mode = ixgbe_fc_tx_pause;
2982 		DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2983 	} else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2984 		   !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2985 		hw->fc.current_mode = ixgbe_fc_rx_pause;
2986 		DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2987 	} else {
2988 		hw->fc.current_mode = ixgbe_fc_none;
2989 		DEBUGOUT("Flow Control = NONE.\n");
2990 	}
2991 	return IXGBE_SUCCESS;
2992 }
2993 
2994 /**
2995  *  ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2996  *  @hw: pointer to hardware structure
2997  *
2998  *  Enable flow control according on 1 gig fiber.
2999  **/
3000 static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
3001 {
3002 	u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
3003 	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3004 
3005 	/*
3006 	 * On multispeed fiber at 1g, bail out if
3007 	 * - link is up but AN did not complete, or if
3008 	 * - link is up and AN completed but timed out
3009 	 */
3010 
3011 	linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
3012 	if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
3013 	    (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
3014 		DEBUGOUT("Auto-Negotiation did not complete or timed out\n");
3015 		goto out;
3016 	}
3017 
3018 	pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
3019 	pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
3020 
3021 	ret_val =  ixgbe_negotiate_fc(hw, pcs_anadv_reg,
3022 				      pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
3023 				      IXGBE_PCS1GANA_ASM_PAUSE,
3024 				      IXGBE_PCS1GANA_SYM_PAUSE,
3025 				      IXGBE_PCS1GANA_ASM_PAUSE);
3026 
3027 out:
3028 	return ret_val;
3029 }
3030 
3031 /**
3032  *  ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
3033  *  @hw: pointer to hardware structure
3034  *
3035  *  Enable flow control according to IEEE clause 37.
3036  **/
3037 static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
3038 {
3039 	u32 links2, anlp1_reg, autoc_reg, links;
3040 	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3041 
3042 	/*
3043 	 * On backplane, bail out if
3044 	 * - backplane autoneg was not completed, or if
3045 	 * - we are 82599 and link partner is not AN enabled
3046 	 */
3047 	links = IXGBE_READ_REG(hw, IXGBE_LINKS);
3048 	if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
3049 		DEBUGOUT("Auto-Negotiation did not complete\n");
3050 		goto out;
3051 	}
3052 
3053 	if (hw->mac.type == ixgbe_mac_82599EB) {
3054 		links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
3055 		if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
3056 			DEBUGOUT("Link partner is not AN enabled\n");
3057 			goto out;
3058 		}
3059 	}
3060 	/*
3061 	 * Read the 10g AN autoc and LP ability registers and resolve
3062 	 * local flow control settings accordingly
3063 	 */
3064 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3065 	anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
3066 
3067 	ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
3068 		anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
3069 		IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
3070 
3071 out:
3072 	return ret_val;
3073 }
3074 
3075 /**
3076  *  ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
3077  *  @hw: pointer to hardware structure
3078  *
3079  *  Enable flow control according to IEEE clause 37.
3080  **/
3081 static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
3082 {
3083 	u16 technology_ability_reg = 0;
3084 	u16 lp_technology_ability_reg = 0;
3085 
3086 	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
3087 			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3088 			     &technology_ability_reg);
3089 	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
3090 			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3091 			     &lp_technology_ability_reg);
3092 
3093 	return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
3094 				  (u32)lp_technology_ability_reg,
3095 				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
3096 				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
3097 }
3098 
3099 /**
3100  *  ixgbe_fc_autoneg - Configure flow control
3101  *  @hw: pointer to hardware structure
3102  *
3103  *  Compares our advertised flow control capabilities to those advertised by
3104  *  our link partner, and determines the proper flow control mode to use.
3105  **/
3106 void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
3107 {
3108 	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3109 	ixgbe_link_speed speed;
3110 	bool link_up;
3111 
3112 	DEBUGFUNC("ixgbe_fc_autoneg");
3113 
3114 	/*
3115 	 * AN should have completed when the cable was plugged in.
3116 	 * Look for reasons to bail out.  Bail out if:
3117 	 * - FC autoneg is disabled, or if
3118 	 * - link is not up.
3119 	 */
3120 	if (hw->fc.disable_fc_autoneg) {
3121 		ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
3122 			     "Flow control autoneg is disabled");
3123 		goto out;
3124 	}
3125 
3126 	hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
3127 	if (!link_up) {
3128 		ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
3129 		goto out;
3130 	}
3131 
3132 	switch (hw->phy.media_type) {
3133 	/* Autoneg flow control on fiber adapters */
3134 	case ixgbe_media_type_fiber_fixed:
3135 	case ixgbe_media_type_fiber_qsfp:
3136 	case ixgbe_media_type_fiber:
3137 		if (speed == IXGBE_LINK_SPEED_1GB_FULL)
3138 			ret_val = ixgbe_fc_autoneg_fiber(hw);
3139 		break;
3140 
3141 	/* Autoneg flow control on backplane adapters */
3142 	case ixgbe_media_type_backplane:
3143 		ret_val = ixgbe_fc_autoneg_backplane(hw);
3144 		break;
3145 
3146 	/* Autoneg flow control on copper adapters */
3147 	case ixgbe_media_type_copper:
3148 		if (ixgbe_device_supports_autoneg_fc(hw))
3149 			ret_val = ixgbe_fc_autoneg_copper(hw);
3150 		break;
3151 
3152 	default:
3153 		break;
3154 	}
3155 
3156 out:
3157 	if (ret_val == IXGBE_SUCCESS) {
3158 		hw->fc.fc_was_autonegged = TRUE;
3159 	} else {
3160 		hw->fc.fc_was_autonegged = FALSE;
3161 		hw->fc.current_mode = hw->fc.requested_mode;
3162 	}
3163 }
3164 
3165 /*
3166  * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
3167  * @hw: pointer to hardware structure
3168  *
3169  * System-wide timeout range is encoded in PCIe Device Control2 register.
3170  *
3171  * Add 10% to specified maximum and return the number of times to poll for
3172  * completion timeout, in units of 100 microsec.  Never return less than
3173  * 800 = 80 millisec.
3174  */
3175 static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
3176 {
3177 	s16 devctl2;
3178 	u32 pollcnt;
3179 
3180 	devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
3181 	devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
3182 
3183 	switch (devctl2) {
3184 	case IXGBE_PCIDEVCTRL2_65_130ms:
3185 		pollcnt = 1300;		/* 130 millisec */
3186 		break;
3187 	case IXGBE_PCIDEVCTRL2_260_520ms:
3188 		pollcnt = 5200;		/* 520 millisec */
3189 		break;
3190 	case IXGBE_PCIDEVCTRL2_1_2s:
3191 		pollcnt = 20000;	/* 2 sec */
3192 		break;
3193 	case IXGBE_PCIDEVCTRL2_4_8s:
3194 		pollcnt = 80000;	/* 8 sec */
3195 		break;
3196 	case IXGBE_PCIDEVCTRL2_17_34s:
3197 		pollcnt = 34000;	/* 34 sec */
3198 		break;
3199 	case IXGBE_PCIDEVCTRL2_50_100us:	/* 100 microsecs */
3200 	case IXGBE_PCIDEVCTRL2_1_2ms:		/* 2 millisecs */
3201 	case IXGBE_PCIDEVCTRL2_16_32ms:		/* 32 millisec */
3202 	case IXGBE_PCIDEVCTRL2_16_32ms_def:	/* 32 millisec default */
3203 	default:
3204 		pollcnt = 800;		/* 80 millisec minimum */
3205 		break;
3206 	}
3207 
3208 	/* add 10% to spec maximum */
3209 	return (pollcnt * 11) / 10;
3210 }
3211 
3212 /**
3213  *  ixgbe_disable_pcie_master - Disable PCI-express master access
3214  *  @hw: pointer to hardware structure
3215  *
3216  *  Disables PCI-Express master access and verifies there are no pending
3217  *  requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
3218  *  bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
3219  *  is returned signifying master requests disabled.
3220  **/
3221 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
3222 {
3223 	s32 status = IXGBE_SUCCESS;
3224 	u32 i, poll;
3225 	u16 value;
3226 
3227 	DEBUGFUNC("ixgbe_disable_pcie_master");
3228 
3229 	/* Always set this bit to ensure any future transactions are blocked */
3230 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
3231 
3232 	/* Exit if master requests are blocked */
3233 	if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
3234 	    IXGBE_REMOVED(hw->hw_addr))
3235 		goto out;
3236 
3237 	/* Poll for master request bit to clear */
3238 	for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
3239 		usec_delay(100);
3240 		if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
3241 			goto out;
3242 	}
3243 
3244 	/*
3245 	 * Two consecutive resets are required via CTRL.RST per datasheet
3246 	 * 5.2.5.3.2 Master Disable.  We set a flag to inform the reset routine
3247 	 * of this need.  The first reset prevents new master requests from
3248 	 * being issued by our device.  We then must wait 1usec or more for any
3249 	 * remaining completions from the PCIe bus to trickle in, and then reset
3250 	 * again to clear out any effects they may have had on our device.
3251 	 */
3252 	DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
3253 	hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
3254 
3255 	if (hw->mac.type >= ixgbe_mac_X550)
3256 		goto out;
3257 
3258 	/*
3259 	 * Before proceeding, make sure that the PCIe block does not have
3260 	 * transactions pending.
3261 	 */
3262 	poll = ixgbe_pcie_timeout_poll(hw);
3263 	for (i = 0; i < poll; i++) {
3264 		usec_delay(100);
3265 		value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
3266 		if (IXGBE_REMOVED(hw->hw_addr))
3267 			goto out;
3268 		if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
3269 			goto out;
3270 	}
3271 
3272 	ERROR_REPORT1(IXGBE_ERROR_POLLING,
3273 		     "PCIe transaction pending bit also did not clear.\n");
3274 	status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
3275 
3276 out:
3277 	return status;
3278 }
3279 
3280 /**
3281  *  ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
3282  *  @hw: pointer to hardware structure
3283  *  @mask: Mask to specify which semaphore to acquire
3284  *
3285  *  Acquires the SWFW semaphore through the GSSR register for the specified
3286  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
3287  **/
3288 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
3289 {
3290 	u32 gssr = 0;
3291 	u32 swmask = mask;
3292 	u32 fwmask = mask << 5;
3293 	u32 timeout = 200;
3294 	u32 i;
3295 
3296 	DEBUGFUNC("ixgbe_acquire_swfw_sync");
3297 
3298 	for (i = 0; i < timeout; i++) {
3299 		/*
3300 		 * SW NVM semaphore bit is used for access to all
3301 		 * SW_FW_SYNC bits (not just NVM)
3302 		 */
3303 		if (ixgbe_get_eeprom_semaphore(hw))
3304 			return IXGBE_ERR_SWFW_SYNC;
3305 
3306 		gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3307 		if (!(gssr & (fwmask | swmask))) {
3308 			gssr |= swmask;
3309 			IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3310 			ixgbe_release_eeprom_semaphore(hw);
3311 			return IXGBE_SUCCESS;
3312 		} else {
3313 			/* Resource is currently in use by FW or SW */
3314 			ixgbe_release_eeprom_semaphore(hw);
3315 			msec_delay(5);
3316 		}
3317 	}
3318 
3319 	/* If time expired clear the bits holding the lock and retry */
3320 	if (gssr & (fwmask | swmask))
3321 		ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
3322 
3323 	msec_delay(5);
3324 	return IXGBE_ERR_SWFW_SYNC;
3325 }
3326 
3327 /**
3328  *  ixgbe_release_swfw_sync - Release SWFW semaphore
3329  *  @hw: pointer to hardware structure
3330  *  @mask: Mask to specify which semaphore to release
3331  *
3332  *  Releases the SWFW semaphore through the GSSR register for the specified
3333  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
3334  **/
3335 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
3336 {
3337 	u32 gssr;
3338 	u32 swmask = mask;
3339 
3340 	DEBUGFUNC("ixgbe_release_swfw_sync");
3341 
3342 	ixgbe_get_eeprom_semaphore(hw);
3343 
3344 	gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3345 	gssr &= ~swmask;
3346 	IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3347 
3348 	ixgbe_release_eeprom_semaphore(hw);
3349 }
3350 
3351 /**
3352  *  ixgbe_disable_sec_rx_path_generic - Stops the receive data path
3353  *  @hw: pointer to hardware structure
3354  *
3355  *  Stops the receive data path and waits for the HW to internally empty
3356  *  the Rx security block
3357  **/
3358 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
3359 {
3360 #define IXGBE_MAX_SECRX_POLL 40
3361 
3362 	int i;
3363 	int secrxreg;
3364 
3365 	DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
3366 
3367 
3368 	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3369 	secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
3370 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3371 	for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
3372 		secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
3373 		if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
3374 			break;
3375 		else
3376 			/* Use interrupt-safe sleep just in case */
3377 			usec_delay(1000);
3378 	}
3379 
3380 	/* For informational purposes only */
3381 	if (i >= IXGBE_MAX_SECRX_POLL)
3382 		DEBUGOUT("Rx unit being enabled before security "
3383 			 "path fully disabled.  Continuing with init.\n");
3384 
3385 	return IXGBE_SUCCESS;
3386 }
3387 
3388 /**
3389  *  prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
3390  *  @hw: pointer to hardware structure
3391  *  @locked: bool to indicate whether the SW/FW lock was taken
3392  *  @reg_val: Value we read from AUTOC
3393  *
3394  *  The default case requires no protection so just to the register read.
3395  */
3396 s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
3397 {
3398 	*locked = FALSE;
3399 	*reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3400 	return IXGBE_SUCCESS;
3401 }
3402 
3403 /**
3404  * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
3405  * @hw: pointer to hardware structure
3406  * @reg_val: value to write to AUTOC
3407  * @locked: bool to indicate whether the SW/FW lock was already taken by
3408  *           previous read.
3409  *
3410  * The default case requires no protection so just to the register write.
3411  */
3412 s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
3413 {
3414 	UNREFERENCED_1PARAMETER(locked);
3415 
3416 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
3417 	return IXGBE_SUCCESS;
3418 }
3419 
3420 /**
3421  *  ixgbe_enable_sec_rx_path_generic - Enables the receive data path
3422  *  @hw: pointer to hardware structure
3423  *
3424  *  Enables the receive data path.
3425  **/
3426 s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
3427 {
3428 	u32 secrxreg;
3429 
3430 	DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
3431 
3432 	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3433 	secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
3434 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3435 	IXGBE_WRITE_FLUSH(hw);
3436 
3437 	return IXGBE_SUCCESS;
3438 }
3439 
3440 /**
3441  *  ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
3442  *  @hw: pointer to hardware structure
3443  *  @regval: register value to write to RXCTRL
3444  *
3445  *  Enables the Rx DMA unit
3446  **/
3447 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
3448 {
3449 	DEBUGFUNC("ixgbe_enable_rx_dma_generic");
3450 
3451 	if (regval & IXGBE_RXCTRL_RXEN)
3452 		ixgbe_enable_rx(hw);
3453 	else
3454 		ixgbe_disable_rx(hw);
3455 
3456 	return IXGBE_SUCCESS;
3457 }
3458 
3459 /**
3460  *  ixgbe_blink_led_start_generic - Blink LED based on index.
3461  *  @hw: pointer to hardware structure
3462  *  @index: led number to blink
3463  **/
3464 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
3465 {
3466 	ixgbe_link_speed speed = 0;
3467 	bool link_up = 0;
3468 	u32 autoc_reg = 0;
3469 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3470 	s32 ret_val = IXGBE_SUCCESS;
3471 	bool locked = FALSE;
3472 
3473 	DEBUGFUNC("ixgbe_blink_led_start_generic");
3474 
3475 	if (index > 3)
3476 		return IXGBE_ERR_PARAM;
3477 
3478 	/*
3479 	 * Link must be up to auto-blink the LEDs;
3480 	 * Force it if link is down.
3481 	 */
3482 	hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
3483 
3484 	if (!link_up) {
3485 		ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3486 		if (ret_val != IXGBE_SUCCESS)
3487 			goto out;
3488 
3489 		autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3490 		autoc_reg |= IXGBE_AUTOC_FLU;
3491 
3492 		ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3493 		if (ret_val != IXGBE_SUCCESS)
3494 			goto out;
3495 
3496 		IXGBE_WRITE_FLUSH(hw);
3497 		msec_delay(10);
3498 	}
3499 
3500 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
3501 	led_reg |= IXGBE_LED_BLINK(index);
3502 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3503 	IXGBE_WRITE_FLUSH(hw);
3504 
3505 out:
3506 	return ret_val;
3507 }
3508 
3509 /**
3510  *  ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
3511  *  @hw: pointer to hardware structure
3512  *  @index: led number to stop blinking
3513  **/
3514 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
3515 {
3516 	u32 autoc_reg = 0;
3517 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3518 	s32 ret_val = IXGBE_SUCCESS;
3519 	bool locked = FALSE;
3520 
3521 	DEBUGFUNC("ixgbe_blink_led_stop_generic");
3522 
3523 	if (index > 3)
3524 		return IXGBE_ERR_PARAM;
3525 
3526 	ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3527 	if (ret_val != IXGBE_SUCCESS)
3528 		goto out;
3529 
3530 	autoc_reg &= ~IXGBE_AUTOC_FLU;
3531 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3532 
3533 	ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3534 	if (ret_val != IXGBE_SUCCESS)
3535 		goto out;
3536 
3537 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
3538 	led_reg &= ~IXGBE_LED_BLINK(index);
3539 	led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
3540 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3541 	IXGBE_WRITE_FLUSH(hw);
3542 
3543 out:
3544 	return ret_val;
3545 }
3546 
3547 /**
3548  *  ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
3549  *  @hw: pointer to hardware structure
3550  *  @san_mac_offset: SAN MAC address offset
3551  *
3552  *  This function will read the EEPROM location for the SAN MAC address
3553  *  pointer, and returns the value at that location.  This is used in both
3554  *  get and set mac_addr routines.
3555  **/
3556 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
3557 					 u16 *san_mac_offset)
3558 {
3559 	s32 ret_val;
3560 
3561 	DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
3562 
3563 	/*
3564 	 * First read the EEPROM pointer to see if the MAC addresses are
3565 	 * available.
3566 	 */
3567 	ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
3568 				      san_mac_offset);
3569 	if (ret_val) {
3570 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3571 			      "eeprom at offset %d failed",
3572 			      IXGBE_SAN_MAC_ADDR_PTR);
3573 	}
3574 
3575 	return ret_val;
3576 }
3577 
3578 /**
3579  *  ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
3580  *  @hw: pointer to hardware structure
3581  *  @san_mac_addr: SAN MAC address
3582  *
3583  *  Reads the SAN MAC address from the EEPROM, if it's available.  This is
3584  *  per-port, so set_lan_id() must be called before reading the addresses.
3585  *  set_lan_id() is called by identify_sfp(), but this cannot be relied
3586  *  upon for non-SFP connections, so we must call it here.
3587  **/
3588 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3589 {
3590 	u16 san_mac_data, san_mac_offset;
3591 	u8 i;
3592 	s32 ret_val;
3593 
3594 	DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
3595 
3596 	/*
3597 	 * First read the EEPROM pointer to see if the MAC addresses are
3598 	 * available.  If they're not, no point in calling set_lan_id() here.
3599 	 */
3600 	ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3601 	if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3602 		goto san_mac_addr_out;
3603 
3604 	/* make sure we know which port we need to program */
3605 	hw->mac.ops.set_lan_id(hw);
3606 	/* apply the port offset to the address offset */
3607 	(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3608 			 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3609 	for (i = 0; i < 3; i++) {
3610 		ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
3611 					      &san_mac_data);
3612 		if (ret_val) {
3613 			ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3614 				      "eeprom read at offset %d failed",
3615 				      san_mac_offset);
3616 			goto san_mac_addr_out;
3617 		}
3618 		san_mac_addr[i * 2] = (u8)(san_mac_data);
3619 		san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
3620 		san_mac_offset++;
3621 	}
3622 	return IXGBE_SUCCESS;
3623 
3624 san_mac_addr_out:
3625 	/*
3626 	 * No addresses available in this EEPROM.  It's not an
3627 	 * error though, so just wipe the local address and return.
3628 	 */
3629 	for (i = 0; i < 6; i++)
3630 		san_mac_addr[i] = 0xFF;
3631 	return IXGBE_SUCCESS;
3632 }
3633 
3634 /**
3635  *  ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
3636  *  @hw: pointer to hardware structure
3637  *  @san_mac_addr: SAN MAC address
3638  *
3639  *  Write a SAN MAC address to the EEPROM.
3640  **/
3641 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3642 {
3643 	s32 ret_val;
3644 	u16 san_mac_data, san_mac_offset;
3645 	u8 i;
3646 
3647 	DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
3648 
3649 	/* Look for SAN mac address pointer.  If not defined, return */
3650 	ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3651 	if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3652 		return IXGBE_ERR_NO_SAN_ADDR_PTR;
3653 
3654 	/* Make sure we know which port we need to write */
3655 	hw->mac.ops.set_lan_id(hw);
3656 	/* Apply the port offset to the address offset */
3657 	(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3658 			 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3659 
3660 	for (i = 0; i < 3; i++) {
3661 		san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
3662 		san_mac_data |= (u16)(san_mac_addr[i * 2]);
3663 		hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
3664 		san_mac_offset++;
3665 	}
3666 
3667 	return IXGBE_SUCCESS;
3668 }
3669 
3670 /**
3671  *  ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
3672  *  @hw: pointer to hardware structure
3673  *
3674  *  Read PCIe configuration space, and get the MSI-X vector count from
3675  *  the capabilities table.
3676  **/
3677 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
3678 {
3679 	u16 msix_count = 1;
3680 	u16 max_msix_count;
3681 	u16 pcie_offset;
3682 
3683 	switch (hw->mac.type) {
3684 	case ixgbe_mac_82598EB:
3685 		pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
3686 		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
3687 		break;
3688 	case ixgbe_mac_82599EB:
3689 	case ixgbe_mac_X540:
3690 	case ixgbe_mac_X550:
3691 	case ixgbe_mac_X550EM_x:
3692 	case ixgbe_mac_X550EM_a:
3693 		pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
3694 		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
3695 		break;
3696 	default:
3697 		return msix_count;
3698 	}
3699 
3700 	DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
3701 	msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
3702 	if (IXGBE_REMOVED(hw->hw_addr))
3703 		msix_count = 0;
3704 	msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
3705 
3706 	/* MSI-X count is zero-based in HW */
3707 	msix_count++;
3708 
3709 	if (msix_count > max_msix_count)
3710 		msix_count = max_msix_count;
3711 
3712 	return msix_count;
3713 }
3714 
3715 /**
3716  *  ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
3717  *  @hw: pointer to hardware structure
3718  *  @addr: Address to put into receive address register
3719  *  @vmdq: VMDq pool to assign
3720  *
3721  *  Puts an ethernet address into a receive address register, or
3722  *  finds the rar that it is already in; adds to the pool list
3723  **/
3724 s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
3725 {
3726 	static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
3727 	u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
3728 	u32 rar;
3729 	u32 rar_low, rar_high;
3730 	u32 addr_low, addr_high;
3731 
3732 	DEBUGFUNC("ixgbe_insert_mac_addr_generic");
3733 
3734 	/* swap bytes for HW little endian */
3735 	addr_low  = addr[0] | (addr[1] << 8)
3736 			    | (addr[2] << 16)
3737 			    | (addr[3] << 24);
3738 	addr_high = addr[4] | (addr[5] << 8);
3739 
3740 	/*
3741 	 * Either find the mac_id in rar or find the first empty space.
3742 	 * rar_highwater points to just after the highest currently used
3743 	 * rar in order to shorten the search.  It grows when we add a new
3744 	 * rar to the top.
3745 	 */
3746 	for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
3747 		rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
3748 
3749 		if (((IXGBE_RAH_AV & rar_high) == 0)
3750 		    && first_empty_rar == NO_EMPTY_RAR_FOUND) {
3751 			first_empty_rar = rar;
3752 		} else if ((rar_high & 0xFFFF) == addr_high) {
3753 			rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
3754 			if (rar_low == addr_low)
3755 				break;    /* found it already in the rars */
3756 		}
3757 	}
3758 
3759 	if (rar < hw->mac.rar_highwater) {
3760 		/* already there so just add to the pool bits */
3761 		ixgbe_set_vmdq(hw, rar, vmdq);
3762 	} else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3763 		/* stick it into first empty RAR slot we found */
3764 		rar = first_empty_rar;
3765 		ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3766 	} else if (rar == hw->mac.rar_highwater) {
3767 		/* add it to the top of the list and inc the highwater mark */
3768 		ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3769 		hw->mac.rar_highwater++;
3770 	} else if (rar >= hw->mac.num_rar_entries) {
3771 		return IXGBE_ERR_INVALID_MAC_ADDR;
3772 	}
3773 
3774 	/*
3775 	 * If we found rar[0], make sure the default pool bit (we use pool 0)
3776 	 * remains cleared to be sure default pool packets will get delivered
3777 	 */
3778 	if (rar == 0)
3779 		ixgbe_clear_vmdq(hw, rar, 0);
3780 
3781 	return rar;
3782 }
3783 
3784 /**
3785  *  ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3786  *  @hw: pointer to hardware struct
3787  *  @rar: receive address register index to disassociate
3788  *  @vmdq: VMDq pool index to remove from the rar
3789  **/
3790 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3791 {
3792 	u32 mpsar_lo, mpsar_hi;
3793 	u32 rar_entries = hw->mac.num_rar_entries;
3794 
3795 	DEBUGFUNC("ixgbe_clear_vmdq_generic");
3796 
3797 	/* Make sure we are using a valid rar index range */
3798 	if (rar >= rar_entries) {
3799 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3800 			     "RAR index %d is out of range.\n", rar);
3801 		return IXGBE_ERR_INVALID_ARGUMENT;
3802 	}
3803 
3804 	mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3805 	mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3806 
3807 	if (IXGBE_REMOVED(hw->hw_addr))
3808 		goto done;
3809 
3810 	if (!mpsar_lo && !mpsar_hi)
3811 		goto done;
3812 
3813 	if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
3814 		if (mpsar_lo) {
3815 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3816 			mpsar_lo = 0;
3817 		}
3818 		if (mpsar_hi) {
3819 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3820 			mpsar_hi = 0;
3821 		}
3822 	} else if (vmdq < 32) {
3823 		mpsar_lo &= ~(1 << vmdq);
3824 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
3825 	} else {
3826 		mpsar_hi &= ~(1 << (vmdq - 32));
3827 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
3828 	}
3829 
3830 	/* was that the last pool using this rar? */
3831 	if (mpsar_lo == 0 && mpsar_hi == 0 &&
3832 	    rar != 0 && rar != hw->mac.san_mac_rar_index)
3833 		hw->mac.ops.clear_rar(hw, rar);
3834 done:
3835 	return IXGBE_SUCCESS;
3836 }
3837 
3838 /**
3839  *  ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
3840  *  @hw: pointer to hardware struct
3841  *  @rar: receive address register index to associate with a VMDq index
3842  *  @vmdq: VMDq pool index
3843  **/
3844 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3845 {
3846 	u32 mpsar;
3847 	u32 rar_entries = hw->mac.num_rar_entries;
3848 
3849 	DEBUGFUNC("ixgbe_set_vmdq_generic");
3850 
3851 	/* Make sure we are using a valid rar index range */
3852 	if (rar >= rar_entries) {
3853 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3854 			     "RAR index %d is out of range.\n", rar);
3855 		return IXGBE_ERR_INVALID_ARGUMENT;
3856 	}
3857 
3858 	if (vmdq < 32) {
3859 		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3860 		mpsar |= 1 << vmdq;
3861 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3862 	} else {
3863 		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3864 		mpsar |= 1 << (vmdq - 32);
3865 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3866 	}
3867 	return IXGBE_SUCCESS;
3868 }
3869 
3870 /**
3871  *  This function should only be involved in the IOV mode.
3872  *  In IOV mode, Default pool is next pool after the number of
3873  *  VFs advertized and not 0.
3874  *  MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
3875  *
3876  *  ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
3877  *  @hw: pointer to hardware struct
3878  *  @vmdq: VMDq pool index
3879  **/
3880 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
3881 {
3882 	u32 rar = hw->mac.san_mac_rar_index;
3883 
3884 	DEBUGFUNC("ixgbe_set_vmdq_san_mac");
3885 
3886 	if (vmdq < 32) {
3887 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
3888 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3889 	} else {
3890 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3891 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
3892 	}
3893 
3894 	return IXGBE_SUCCESS;
3895 }
3896 
3897 /**
3898  *  ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3899  *  @hw: pointer to hardware structure
3900  **/
3901 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3902 {
3903 	int i;
3904 
3905 	DEBUGFUNC("ixgbe_init_uta_tables_generic");
3906 	DEBUGOUT(" Clearing UTA\n");
3907 
3908 	for (i = 0; i < 128; i++)
3909 		IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3910 
3911 	return IXGBE_SUCCESS;
3912 }
3913 
3914 /**
3915  *  ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3916  *  @hw: pointer to hardware structure
3917  *  @vlan: VLAN id to write to VLAN filter
3918  *  @vlvf_bypass: TRUE to find vlanid only, FALSE returns first empty slot if
3919  *		  vlanid not found
3920  *
3921  *
3922  *  return the VLVF index where this VLAN id should be placed
3923  *
3924  **/
3925 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
3926 {
3927 	s32 regindex, first_empty_slot;
3928 	u32 bits;
3929 
3930 	/* short cut the special case */
3931 	if (vlan == 0)
3932 		return 0;
3933 
3934 	/* if vlvf_bypass is set we don't want to use an empty slot, we
3935 	 * will simply bypass the VLVF if there are no entries present in the
3936 	 * VLVF that contain our VLAN
3937 	 */
3938 	first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0;
3939 
3940 	/* add VLAN enable bit for comparison */
3941 	vlan |= IXGBE_VLVF_VIEN;
3942 
3943 	/* Search for the vlan id in the VLVF entries. Save off the first empty
3944 	 * slot found along the way.
3945 	 *
3946 	 * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1
3947 	 */
3948 	for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) {
3949 		bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3950 		if (bits == vlan)
3951 			return regindex;
3952 		if (!first_empty_slot && !bits)
3953 			first_empty_slot = regindex;
3954 	}
3955 
3956 	/* If we are here then we didn't find the VLAN.  Return first empty
3957 	 * slot we found during our search, else error.
3958 	 */
3959 	if (!first_empty_slot)
3960 		ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "No space in VLVF.\n");
3961 
3962 	return first_empty_slot ? first_empty_slot : IXGBE_ERR_NO_SPACE;
3963 }
3964 
3965 /**
3966  *  ixgbe_set_vfta_generic - Set VLAN filter table
3967  *  @hw: pointer to hardware structure
3968  *  @vlan: VLAN id to write to VLAN filter
3969  *  @vind: VMDq output index that maps queue to VLAN id in VLVFB
3970  *  @vlan_on: boolean flag to turn on/off VLAN
3971  *  @vlvf_bypass: boolean flag indicating updating default pool is okay
3972  *
3973  *  Turn on/off specified VLAN in the VLAN filter table.
3974  **/
3975 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3976 			   bool vlan_on, bool vlvf_bypass)
3977 {
3978 	u32 regidx, vfta_delta, vfta;
3979 	s32 ret_val;
3980 
3981 	DEBUGFUNC("ixgbe_set_vfta_generic");
3982 
3983 	if (vlan > 4095 || vind > 63)
3984 		return IXGBE_ERR_PARAM;
3985 
3986 	/*
3987 	 * this is a 2 part operation - first the VFTA, then the
3988 	 * VLVF and VLVFB if VT Mode is set
3989 	 * We don't write the VFTA until we know the VLVF part succeeded.
3990 	 */
3991 
3992 	/* Part 1
3993 	 * The VFTA is a bitstring made up of 128 32-bit registers
3994 	 * that enable the particular VLAN id, much like the MTA:
3995 	 *    bits[11-5]: which register
3996 	 *    bits[4-0]:  which bit in the register
3997 	 */
3998 	regidx = vlan / 32;
3999 	vfta_delta = 1 << (vlan % 32);
4000 	vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx));
4001 
4002 	/*
4003 	 * vfta_delta represents the difference between the current value
4004 	 * of vfta and the value we want in the register.  Since the diff
4005 	 * is an XOR mask we can just update the vfta using an XOR
4006 	 */
4007 	vfta_delta &= vlan_on ? ~vfta : vfta;
4008 	vfta ^= vfta_delta;
4009 
4010 	/* Part 2
4011 	 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
4012 	 */
4013 	ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, &vfta_delta,
4014 					 vfta, vlvf_bypass);
4015 	if (ret_val != IXGBE_SUCCESS) {
4016 		if (vlvf_bypass)
4017 			goto vfta_update;
4018 		return ret_val;
4019 	}
4020 
4021 vfta_update:
4022 	/* Update VFTA now that we are ready for traffic */
4023 	if (vfta_delta)
4024 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta);
4025 
4026 	return IXGBE_SUCCESS;
4027 }
4028 
4029 /**
4030  *  ixgbe_set_vlvf_generic - Set VLAN Pool Filter
4031  *  @hw: pointer to hardware structure
4032  *  @vlan: VLAN id to write to VLAN filter
4033  *  @vind: VMDq output index that maps queue to VLAN id in VLVFB
4034  *  @vlan_on: boolean flag to turn on/off VLAN in VLVF
4035  *  @vfta_delta: pointer to the difference between the current value of VFTA
4036  *		 and the desired value
4037  *  @vfta: the desired value of the VFTA
4038  *  @vlvf_bypass: boolean flag indicating updating default pool is okay
4039  *
4040  *  Turn on/off specified bit in VLVF table.
4041  **/
4042 s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
4043 			   bool vlan_on, u32 *vfta_delta, u32 vfta,
4044 			   bool vlvf_bypass)
4045 {
4046 	u32 bits;
4047 	s32 vlvf_index;
4048 
4049 	DEBUGFUNC("ixgbe_set_vlvf_generic");
4050 
4051 	if (vlan > 4095 || vind > 63)
4052 		return IXGBE_ERR_PARAM;
4053 
4054 	/* If VT Mode is set
4055 	 *   Either vlan_on
4056 	 *     make sure the vlan is in VLVF
4057 	 *     set the vind bit in the matching VLVFB
4058 	 *   Or !vlan_on
4059 	 *     clear the pool bit and possibly the vind
4060 	 */
4061 	if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE))
4062 		return IXGBE_SUCCESS;
4063 
4064 	vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass);
4065 	if (vlvf_index < 0)
4066 		return vlvf_index;
4067 
4068 	bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32));
4069 
4070 	/* set the pool bit */
4071 	bits |= 1 << (vind % 32);
4072 	if (vlan_on)
4073 		goto vlvf_update;
4074 
4075 	/* clear the pool bit */
4076 	bits ^= 1 << (vind % 32);
4077 
4078 	if (!bits &&
4079 	    !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) {
4080 		/* Clear VFTA first, then disable VLVF.  Otherwise
4081 		 * we run the risk of stray packets leaking into
4082 		 * the PF via the default pool
4083 		 */
4084 		if (*vfta_delta)
4085 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(vlan / 32), vfta);
4086 
4087 		/* disable VLVF and clear remaining bit from pool */
4088 		IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
4089 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0);
4090 
4091 		return IXGBE_SUCCESS;
4092 	}
4093 
4094 	/* If there are still bits set in the VLVFB registers
4095 	 * for the VLAN ID indicated we need to see if the
4096 	 * caller is requesting that we clear the VFTA entry bit.
4097 	 * If the caller has requested that we clear the VFTA
4098 	 * entry bit but there are still pools/VFs using this VLAN
4099 	 * ID entry then ignore the request.  We're not worried
4100 	 * about the case where we're turning the VFTA VLAN ID
4101 	 * entry bit on, only when requested to turn it off as
4102 	 * there may be multiple pools and/or VFs using the
4103 	 * VLAN ID entry.  In that case we cannot clear the
4104 	 * VFTA bit until all pools/VFs using that VLAN ID have also
4105 	 * been cleared.  This will be indicated by "bits" being
4106 	 * zero.
4107 	 */
4108 	*vfta_delta = 0;
4109 
4110 vlvf_update:
4111 	/* record pool change and enable VLAN ID if not already enabled */
4112 	IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits);
4113 	IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan);
4114 
4115 	return IXGBE_SUCCESS;
4116 }
4117 
4118 /**
4119  *  ixgbe_clear_vfta_generic - Clear VLAN filter table
4120  *  @hw: pointer to hardware structure
4121  *
4122  *  Clears the VLAN filer table, and the VMDq index associated with the filter
4123  **/
4124 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
4125 {
4126 	u32 offset;
4127 
4128 	DEBUGFUNC("ixgbe_clear_vfta_generic");
4129 
4130 	for (offset = 0; offset < hw->mac.vft_size; offset++)
4131 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
4132 
4133 	for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
4134 		IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
4135 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
4136 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
4137 	}
4138 
4139 	return IXGBE_SUCCESS;
4140 }
4141 
4142 /**
4143  *  ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix
4144  *  @hw: pointer to hardware structure
4145  *
4146  *  Contains the logic to identify if we need to verify link for the
4147  *  crosstalk fix
4148  **/
4149 static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw)
4150 {
4151 
4152 	/* Does FW say we need the fix */
4153 	if (!hw->need_crosstalk_fix)
4154 		return FALSE;
4155 
4156 	/* Only consider SFP+ PHYs i.e. media type fiber */
4157 	switch (hw->mac.ops.get_media_type(hw)) {
4158 	case ixgbe_media_type_fiber:
4159 	case ixgbe_media_type_fiber_qsfp:
4160 		break;
4161 	default:
4162 		return FALSE;
4163 	}
4164 
4165 	return TRUE;
4166 }
4167 
4168 /**
4169  *  ixgbe_check_mac_link_generic - Determine link and speed status
4170  *  @hw: pointer to hardware structure
4171  *  @speed: pointer to link speed
4172  *  @link_up: TRUE when link is up
4173  *  @link_up_wait_to_complete: bool used to wait for link up or not
4174  *
4175  *  Reads the links register to determine if link is up and the current speed
4176  **/
4177 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4178 				 bool *link_up, bool link_up_wait_to_complete)
4179 {
4180 	u32 links_reg, links_orig;
4181 	u32 i;
4182 
4183 	DEBUGFUNC("ixgbe_check_mac_link_generic");
4184 
4185 	/* If Crosstalk fix enabled do the sanity check of making sure
4186 	 * the SFP+ cage is full.
4187 	 */
4188 	if (ixgbe_need_crosstalk_fix(hw)) {
4189 		u32 sfp_cage_full;
4190 
4191 		switch (hw->mac.type) {
4192 		case ixgbe_mac_82599EB:
4193 			sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4194 					IXGBE_ESDP_SDP2;
4195 			break;
4196 		case ixgbe_mac_X550EM_x:
4197 		case ixgbe_mac_X550EM_a:
4198 			sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4199 					IXGBE_ESDP_SDP0;
4200 			break;
4201 		default:
4202 			/* sanity check - No SFP+ devices here */
4203 			sfp_cage_full = FALSE;
4204 			break;
4205 		}
4206 
4207 		if (!sfp_cage_full) {
4208 			*link_up = FALSE;
4209 			*speed = IXGBE_LINK_SPEED_UNKNOWN;
4210 			return IXGBE_SUCCESS;
4211 		}
4212 	}
4213 
4214 	/* clear the old state */
4215 	links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
4216 
4217 	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4218 
4219 	if (links_orig != links_reg) {
4220 		DEBUGOUT2("LINKS changed from %08X to %08X\n",
4221 			  links_orig, links_reg);
4222 	}
4223 
4224 	if (link_up_wait_to_complete) {
4225 		for (i = 0; i < hw->mac.max_link_up_time; i++) {
4226 			if (links_reg & IXGBE_LINKS_UP) {
4227 				*link_up = TRUE;
4228 				break;
4229 			} else {
4230 				*link_up = FALSE;
4231 			}
4232 			msec_delay(100);
4233 			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4234 		}
4235 	} else {
4236 		if (links_reg & IXGBE_LINKS_UP)
4237 			*link_up = TRUE;
4238 		else
4239 			*link_up = FALSE;
4240 	}
4241 
4242 	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
4243 	case IXGBE_LINKS_SPEED_10G_82599:
4244 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
4245 		if (hw->mac.type >= ixgbe_mac_X550) {
4246 			if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4247 				*speed = IXGBE_LINK_SPEED_2_5GB_FULL;
4248 		}
4249 		break;
4250 	case IXGBE_LINKS_SPEED_1G_82599:
4251 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
4252 		break;
4253 	case IXGBE_LINKS_SPEED_100_82599:
4254 		*speed = IXGBE_LINK_SPEED_100_FULL;
4255 		if (hw->mac.type == ixgbe_mac_X550) {
4256 			if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4257 				*speed = IXGBE_LINK_SPEED_5GB_FULL;
4258 		}
4259 		break;
4260 	case IXGBE_LINKS_SPEED_10_X550EM_A:
4261 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
4262 		if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
4263 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
4264 			*speed = IXGBE_LINK_SPEED_10_FULL;
4265 		break;
4266 	default:
4267 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
4268 	}
4269 
4270 	return IXGBE_SUCCESS;
4271 }
4272 
4273 /**
4274  *  ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
4275  *  the EEPROM
4276  *  @hw: pointer to hardware structure
4277  *  @wwnn_prefix: the alternative WWNN prefix
4278  *  @wwpn_prefix: the alternative WWPN prefix
4279  *
4280  *  This function will read the EEPROM from the alternative SAN MAC address
4281  *  block to check the support for the alternative WWNN/WWPN prefix support.
4282  **/
4283 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
4284 				 u16 *wwpn_prefix)
4285 {
4286 	u16 offset, caps;
4287 	u16 alt_san_mac_blk_offset;
4288 
4289 	DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
4290 
4291 	/* clear output first */
4292 	*wwnn_prefix = 0xFFFF;
4293 	*wwpn_prefix = 0xFFFF;
4294 
4295 	/* check if alternative SAN MAC is supported */
4296 	offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
4297 	if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
4298 		goto wwn_prefix_err;
4299 
4300 	if ((alt_san_mac_blk_offset == 0) ||
4301 	    (alt_san_mac_blk_offset == 0xFFFF))
4302 		goto wwn_prefix_out;
4303 
4304 	/* check capability in alternative san mac address block */
4305 	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
4306 	if (hw->eeprom.ops.read(hw, offset, &caps))
4307 		goto wwn_prefix_err;
4308 	if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
4309 		goto wwn_prefix_out;
4310 
4311 	/* get the corresponding prefix for WWNN/WWPN */
4312 	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
4313 	if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) {
4314 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4315 			      "eeprom read at offset %d failed", offset);
4316 	}
4317 
4318 	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
4319 	if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
4320 		goto wwn_prefix_err;
4321 
4322 wwn_prefix_out:
4323 	return IXGBE_SUCCESS;
4324 
4325 wwn_prefix_err:
4326 	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4327 		      "eeprom read at offset %d failed", offset);
4328 	return IXGBE_SUCCESS;
4329 }
4330 
4331 /**
4332  *  ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
4333  *  @hw: pointer to hardware structure
4334  *  @bs: the fcoe boot status
4335  *
4336  *  This function will read the FCOE boot status from the iSCSI FCOE block
4337  **/
4338 s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
4339 {
4340 	u16 offset, caps, flags;
4341 	s32 status;
4342 
4343 	DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
4344 
4345 	/* clear output first */
4346 	*bs = ixgbe_fcoe_bootstatus_unavailable;
4347 
4348 	/* check if FCOE IBA block is present */
4349 	offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
4350 	status = hw->eeprom.ops.read(hw, offset, &caps);
4351 	if (status != IXGBE_SUCCESS)
4352 		goto out;
4353 
4354 	if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
4355 		goto out;
4356 
4357 	/* check if iSCSI FCOE block is populated */
4358 	status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
4359 	if (status != IXGBE_SUCCESS)
4360 		goto out;
4361 
4362 	if ((offset == 0) || (offset == 0xFFFF))
4363 		goto out;
4364 
4365 	/* read fcoe flags in iSCSI FCOE block */
4366 	offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
4367 	status = hw->eeprom.ops.read(hw, offset, &flags);
4368 	if (status != IXGBE_SUCCESS)
4369 		goto out;
4370 
4371 	if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
4372 		*bs = ixgbe_fcoe_bootstatus_enabled;
4373 	else
4374 		*bs = ixgbe_fcoe_bootstatus_disabled;
4375 
4376 out:
4377 	return status;
4378 }
4379 
4380 /**
4381  *  ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
4382  *  @hw: pointer to hardware structure
4383  *  @enable: enable or disable switch for MAC anti-spoofing
4384  *  @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing
4385  *
4386  **/
4387 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4388 {
4389 	int vf_target_reg = vf >> 3;
4390 	int vf_target_shift = vf % 8;
4391 	u32 pfvfspoof;
4392 
4393 	if (hw->mac.type == ixgbe_mac_82598EB)
4394 		return;
4395 
4396 	pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4397 	if (enable)
4398 		pfvfspoof |= (1 << vf_target_shift);
4399 	else
4400 		pfvfspoof &= ~(1 << vf_target_shift);
4401 	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4402 }
4403 
4404 /**
4405  *  ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
4406  *  @hw: pointer to hardware structure
4407  *  @enable: enable or disable switch for VLAN anti-spoofing
4408  *  @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
4409  *
4410  **/
4411 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4412 {
4413 	int vf_target_reg = vf >> 3;
4414 	int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
4415 	u32 pfvfspoof;
4416 
4417 	if (hw->mac.type == ixgbe_mac_82598EB)
4418 		return;
4419 
4420 	pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4421 	if (enable)
4422 		pfvfspoof |= (1 << vf_target_shift);
4423 	else
4424 		pfvfspoof &= ~(1 << vf_target_shift);
4425 	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4426 }
4427 
4428 /**
4429  *  ixgbe_get_device_caps_generic - Get additional device capabilities
4430  *  @hw: pointer to hardware structure
4431  *  @device_caps: the EEPROM word with the extra device capabilities
4432  *
4433  *  This function will read the EEPROM location for the device capabilities,
4434  *  and return the word through device_caps.
4435  **/
4436 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
4437 {
4438 	DEBUGFUNC("ixgbe_get_device_caps_generic");
4439 
4440 	hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
4441 
4442 	return IXGBE_SUCCESS;
4443 }
4444 
4445 /**
4446  *  ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
4447  *  @hw: pointer to hardware structure
4448  *
4449  **/
4450 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
4451 {
4452 	u32 regval;
4453 	u32 i;
4454 
4455 	DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
4456 
4457 	/* Enable relaxed ordering */
4458 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
4459 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
4460 		regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4461 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
4462 	}
4463 
4464 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
4465 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
4466 		regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
4467 			  IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
4468 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
4469 	}
4470 
4471 }
4472 
4473 /**
4474  *  ixgbe_calculate_checksum - Calculate checksum for buffer
4475  *  @buffer: pointer to EEPROM
4476  *  @length: size of EEPROM to calculate a checksum for
4477  *  Calculates the checksum for some buffer on a specified length.  The
4478  *  checksum calculated is returned.
4479  **/
4480 u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
4481 {
4482 	u32 i;
4483 	u8 sum = 0;
4484 
4485 	DEBUGFUNC("ixgbe_calculate_checksum");
4486 
4487 	if (!buffer)
4488 		return 0;
4489 
4490 	for (i = 0; i < length; i++)
4491 		sum += buffer[i];
4492 
4493 	return (u8) (0 - sum);
4494 }
4495 
4496 /**
4497  *  ixgbe_hic_unlocked - Issue command to manageability block unlocked
4498  *  @hw: pointer to the HW structure
4499  *  @buffer: command to write and where the return status will be placed
4500  *  @length: length of buffer, must be multiple of 4 bytes
4501  *  @timeout: time in ms to wait for command completion
4502  *
4503  *  Communicates with the manageability block. On success return IXGBE_SUCCESS
4504  *  else returns semaphore error when encountering an error acquiring
4505  *  semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4506  *
4507  *  This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held
4508  *  by the caller.
4509  **/
4510 s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
4511 		       u32 timeout)
4512 {
4513 	u32 hicr, i, fwsts;
4514 	u16 dword_len;
4515 
4516 	DEBUGFUNC("ixgbe_hic_unlocked");
4517 
4518 	if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4519 		DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
4520 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4521 	}
4522 
4523 	/* Set bit 9 of FWSTS clearing FW reset indication */
4524 	fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
4525 	IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
4526 
4527 	/* Check that the host interface is enabled. */
4528 	hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4529 	if (!(hicr & IXGBE_HICR_EN)) {
4530 		DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
4531 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4532 	}
4533 
4534 	/* Calculate length in DWORDs. We must be DWORD aligned */
4535 	if (length % sizeof(u32)) {
4536 		DEBUGOUT("Buffer length failure, not aligned to dword");
4537 		return IXGBE_ERR_INVALID_ARGUMENT;
4538 	}
4539 
4540 	dword_len = length >> 2;
4541 
4542 	/* The device driver writes the relevant command block
4543 	 * into the ram area.
4544 	 */
4545 	for (i = 0; i < dword_len; i++)
4546 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
4547 				      i, IXGBE_CPU_TO_LE32(buffer[i]));
4548 
4549 	/* Setting this bit tells the ARC that a new command is pending. */
4550 	IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
4551 
4552 	for (i = 0; i < timeout; i++) {
4553 		hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4554 		if (!(hicr & IXGBE_HICR_C))
4555 			break;
4556 		msec_delay(1);
4557 	}
4558 
4559 	/* Check command completion */
4560 	if ((timeout && i == timeout) ||
4561 	    !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
4562 		ERROR_REPORT1(IXGBE_ERROR_CAUTION,
4563 			     "Command has failed with no status valid.\n");
4564 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4565 	}
4566 
4567 	return IXGBE_SUCCESS;
4568 }
4569 
4570 /**
4571  *  ixgbe_host_interface_command - Issue command to manageability block
4572  *  @hw: pointer to the HW structure
4573  *  @buffer: contains the command to write and where the return status will
4574  *   be placed
4575  *  @length: length of buffer, must be multiple of 4 bytes
4576  *  @timeout: time in ms to wait for command completion
4577  *  @return_data: read and return data from the buffer (TRUE) or not (FALSE)
4578  *   Needed because FW structures are big endian and decoding of
4579  *   these fields can be 8 bit or 16 bit based on command. Decoding
4580  *   is not easily understood without making a table of commands.
4581  *   So we will leave this up to the caller to read back the data
4582  *   in these cases.
4583  *
4584  *  Communicates with the manageability block. On success return IXGBE_SUCCESS
4585  *  else returns semaphore error when encountering an error acquiring
4586  *  semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4587  **/
4588 s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
4589 				 u32 length, u32 timeout, bool return_data)
4590 {
4591 	u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
4592 	struct ixgbe_hic_hdr *resp = (struct ixgbe_hic_hdr *)buffer;
4593 	u16 buf_len;
4594 	s32 status;
4595 	u32 bi;
4596 	u32 dword_len;
4597 
4598 	DEBUGFUNC("ixgbe_host_interface_command");
4599 
4600 	if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4601 		DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
4602 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4603 	}
4604 
4605 	/* Take management host interface semaphore */
4606 	status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4607 	if (status)
4608 		return status;
4609 
4610 	status = ixgbe_hic_unlocked(hw, buffer, length, timeout);
4611 	if (status)
4612 		goto rel_out;
4613 
4614 	if (!return_data)
4615 		goto rel_out;
4616 
4617 	/* Calculate length in DWORDs */
4618 	dword_len = hdr_size >> 2;
4619 
4620 	/* first pull in the header so we know the buffer length */
4621 	for (bi = 0; bi < dword_len; bi++) {
4622 		buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4623 		IXGBE_LE32_TO_CPUS(&buffer[bi]);
4624 	}
4625 
4626 	/*
4627 	 * If there is any thing in data position pull it in
4628 	 * Read Flash command requires reading buffer length from
4629 	 * two byes instead of one byte
4630 	 */
4631 	if (resp->cmd == 0x30) {
4632 		for (; bi < dword_len + 2; bi++) {
4633 			buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
4634 							  bi);
4635 			IXGBE_LE32_TO_CPUS(&buffer[bi]);
4636 		}
4637 		buf_len = (((u16)(resp->cmd_or_resp.ret_status) << 3)
4638 				  & 0xF00) | resp->buf_len;
4639 		hdr_size += (2 << 2);
4640 	} else {
4641 		buf_len = resp->buf_len;
4642 	}
4643 	if (!buf_len)
4644 		goto rel_out;
4645 
4646 	if (length < buf_len + hdr_size) {
4647 		DEBUGOUT("Buffer not large enough for reply message.\n");
4648 		status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4649 		goto rel_out;
4650 	}
4651 
4652 	/* Calculate length in DWORDs, add 3 for odd lengths */
4653 	dword_len = (buf_len + 3) >> 2;
4654 
4655 	/* Pull in the rest of the buffer (bi is where we left off) */
4656 	for (; bi <= dword_len; bi++) {
4657 		buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4658 		IXGBE_LE32_TO_CPUS(&buffer[bi]);
4659 	}
4660 
4661 rel_out:
4662 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4663 
4664 	return status;
4665 }
4666 
4667 /**
4668  *  ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
4669  *  @hw: pointer to the HW structure
4670  *  @maj: driver version major number
4671  *  @min: driver version minor number
4672  *  @build: driver version build number
4673  *  @sub: driver version sub build number
4674  *  @len: unused
4675  *  @driver_ver: unused
4676  *
4677  *  Sends driver version number to firmware through the manageability
4678  *  block.  On success return IXGBE_SUCCESS
4679  *  else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4680  *  semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4681  **/
4682 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
4683 				 u8 build, u8 sub, u16 len,
4684 				 const char *driver_ver)
4685 {
4686 	struct ixgbe_hic_drv_info fw_cmd;
4687 	int i;
4688 	s32 ret_val = IXGBE_SUCCESS;
4689 
4690 	DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
4691 	UNREFERENCED_2PARAMETER(len, driver_ver);
4692 
4693 	fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4694 	fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
4695 	fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4696 	fw_cmd.port_num = (u8)hw->bus.func;
4697 	fw_cmd.ver_maj = maj;
4698 	fw_cmd.ver_min = min;
4699 	fw_cmd.ver_build = build;
4700 	fw_cmd.ver_sub = sub;
4701 	fw_cmd.hdr.checksum = 0;
4702 	fw_cmd.pad = 0;
4703 	fw_cmd.pad2 = 0;
4704 	fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4705 				(FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4706 
4707 	for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4708 		ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4709 						       sizeof(fw_cmd),
4710 						       IXGBE_HI_COMMAND_TIMEOUT,
4711 						       TRUE);
4712 		if (ret_val != IXGBE_SUCCESS)
4713 			continue;
4714 
4715 		if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4716 		    FW_CEM_RESP_STATUS_SUCCESS)
4717 			ret_val = IXGBE_SUCCESS;
4718 		else
4719 			ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4720 
4721 		break;
4722 	}
4723 
4724 	return ret_val;
4725 }
4726 
4727 /**
4728  * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
4729  * @hw: pointer to hardware structure
4730  * @num_pb: number of packet buffers to allocate
4731  * @headroom: reserve n KB of headroom
4732  * @strategy: packet buffer allocation strategy
4733  **/
4734 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
4735 			     int strategy)
4736 {
4737 	u32 pbsize = hw->mac.rx_pb_size;
4738 	int i = 0;
4739 	u32 rxpktsize, txpktsize, txpbthresh;
4740 
4741 	/* Reserve headroom */
4742 	pbsize -= headroom;
4743 
4744 	if (!num_pb)
4745 		num_pb = 1;
4746 
4747 	/* Divide remaining packet buffer space amongst the number of packet
4748 	 * buffers requested using supplied strategy.
4749 	 */
4750 	switch (strategy) {
4751 	case PBA_STRATEGY_WEIGHTED:
4752 		/* ixgbe_dcb_pba_80_48 strategy weight first half of packet
4753 		 * buffer with 5/8 of the packet buffer space.
4754 		 */
4755 		rxpktsize = (pbsize * 5) / (num_pb * 4);
4756 		pbsize -= rxpktsize * (num_pb / 2);
4757 		rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
4758 		for (; i < (num_pb / 2); i++)
4759 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4760 		/* configure remaining packet buffers */
4761 		/* FALLTHROUGH */
4762 	case PBA_STRATEGY_EQUAL:
4763 		rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
4764 		for (; i < num_pb; i++)
4765 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4766 		break;
4767 	default:
4768 		break;
4769 	}
4770 
4771 	/* Only support an equally distributed Tx packet buffer strategy. */
4772 	txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
4773 	txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
4774 	for (i = 0; i < num_pb; i++) {
4775 		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4776 		IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4777 	}
4778 
4779 	/* Clear unused TCs, if any, to zero buffer size*/
4780 	for (; i < IXGBE_MAX_PB; i++) {
4781 		IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4782 		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4783 		IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4784 	}
4785 }
4786 
4787 /**
4788  * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
4789  * @hw: pointer to the hardware structure
4790  *
4791  * The 82599 and x540 MACs can experience issues if TX work is still pending
4792  * when a reset occurs.  This function prevents this by flushing the PCIe
4793  * buffers on the system.
4794  **/
4795 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
4796 {
4797 	u32 gcr_ext, hlreg0, i, poll;
4798 	u16 value;
4799 
4800 	/*
4801 	 * If double reset is not requested then all transactions should
4802 	 * already be clear and as such there is no work to do
4803 	 */
4804 	if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
4805 		return;
4806 
4807 	/*
4808 	 * Set loopback enable to prevent any transmits from being sent
4809 	 * should the link come up.  This assumes that the RXCTRL.RXEN bit
4810 	 * has already been cleared.
4811 	 */
4812 	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4813 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
4814 
4815 	/* Wait for a last completion before clearing buffers */
4816 	IXGBE_WRITE_FLUSH(hw);
4817 	msec_delay(3);
4818 
4819 	/*
4820 	 * Before proceeding, make sure that the PCIe block does not have
4821 	 * transactions pending.
4822 	 */
4823 	poll = ixgbe_pcie_timeout_poll(hw);
4824 	for (i = 0; i < poll; i++) {
4825 		usec_delay(100);
4826 		value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
4827 		if (IXGBE_REMOVED(hw->hw_addr))
4828 			goto out;
4829 		if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
4830 			goto out;
4831 	}
4832 
4833 out:
4834 	/* initiate cleaning flow for buffers in the PCIe transaction layer */
4835 	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
4836 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
4837 			gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
4838 
4839 	/* Flush all writes and allow 20usec for all transactions to clear */
4840 	IXGBE_WRITE_FLUSH(hw);
4841 	usec_delay(20);
4842 
4843 	/* restore previous register values */
4844 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4845 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4846 }
4847 
4848 /**
4849  *  ixgbe_bypass_rw_generic - Bit bang data into by_pass FW
4850  *
4851  *  @hw: pointer to hardware structure
4852  *  @cmd: Command we send to the FW
4853  *  @status: The reply from the FW
4854  *
4855  *  Bit-bangs the cmd to the by_pass FW status points to what is returned.
4856  **/
4857 #define IXGBE_BYPASS_BB_WAIT 1
4858 s32 ixgbe_bypass_rw_generic(struct ixgbe_hw *hw, u32 cmd, u32 *status)
4859 {
4860 	int i;
4861 	u32 sck, sdi, sdo, dir_sck, dir_sdi, dir_sdo;
4862 	u32 esdp;
4863 
4864 	if (!status)
4865 		return IXGBE_ERR_PARAM;
4866 
4867 	*status = 0;
4868 
4869 	/* SDP vary by MAC type */
4870 	switch (hw->mac.type) {
4871 	case ixgbe_mac_82599EB:
4872 		sck = IXGBE_ESDP_SDP7;
4873 		sdi = IXGBE_ESDP_SDP0;
4874 		sdo = IXGBE_ESDP_SDP6;
4875 		dir_sck = IXGBE_ESDP_SDP7_DIR;
4876 		dir_sdi = IXGBE_ESDP_SDP0_DIR;
4877 		dir_sdo = IXGBE_ESDP_SDP6_DIR;
4878 		break;
4879 	case ixgbe_mac_X540:
4880 		sck = IXGBE_ESDP_SDP2;
4881 		sdi = IXGBE_ESDP_SDP0;
4882 		sdo = IXGBE_ESDP_SDP1;
4883 		dir_sck = IXGBE_ESDP_SDP2_DIR;
4884 		dir_sdi = IXGBE_ESDP_SDP0_DIR;
4885 		dir_sdo = IXGBE_ESDP_SDP1_DIR;
4886 		break;
4887 	default:
4888 		return IXGBE_ERR_DEVICE_NOT_SUPPORTED;
4889 	}
4890 
4891 	/* Set SDP pins direction */
4892 	esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
4893 	esdp |= dir_sck;	/* SCK as output */
4894 	esdp |= dir_sdi;	/* SDI as output */
4895 	esdp &= ~dir_sdo;	/* SDO as input */
4896 	esdp |= sck;
4897 	esdp |= sdi;
4898 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4899 	IXGBE_WRITE_FLUSH(hw);
4900 	msec_delay(IXGBE_BYPASS_BB_WAIT);
4901 
4902 	/* Generate start condition */
4903 	esdp &= ~sdi;
4904 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4905 	IXGBE_WRITE_FLUSH(hw);
4906 	msec_delay(IXGBE_BYPASS_BB_WAIT);
4907 
4908 	esdp &= ~sck;
4909 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4910 	IXGBE_WRITE_FLUSH(hw);
4911 	msec_delay(IXGBE_BYPASS_BB_WAIT);
4912 
4913 	/* Clock out the new control word and clock in the status */
4914 	for (i = 0; i < 32; i++) {
4915 		if ((cmd >> (31 - i)) & 0x01) {
4916 			esdp |= sdi;
4917 			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4918 		} else {
4919 			esdp &= ~sdi;
4920 			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4921 		}
4922 		IXGBE_WRITE_FLUSH(hw);
4923 		msec_delay(IXGBE_BYPASS_BB_WAIT);
4924 
4925 		esdp |= sck;
4926 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4927 		IXGBE_WRITE_FLUSH(hw);
4928 		msec_delay(IXGBE_BYPASS_BB_WAIT);
4929 
4930 		esdp &= ~sck;
4931 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4932 		IXGBE_WRITE_FLUSH(hw);
4933 		msec_delay(IXGBE_BYPASS_BB_WAIT);
4934 
4935 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
4936 		if (esdp & sdo)
4937 			*status = (*status << 1) | 0x01;
4938 		else
4939 			*status = (*status << 1) | 0x00;
4940 		msec_delay(IXGBE_BYPASS_BB_WAIT);
4941 	}
4942 
4943 	/* stop condition */
4944 	esdp |= sck;
4945 	esdp &= ~sdi;
4946 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4947 	IXGBE_WRITE_FLUSH(hw);
4948 	msec_delay(IXGBE_BYPASS_BB_WAIT);
4949 
4950 	esdp |= sdi;
4951 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4952 	IXGBE_WRITE_FLUSH(hw);
4953 
4954 	/* set the page bits to match the cmd that the status it belongs to */
4955 	*status = (*status & 0x3fffffff) | (cmd & 0xc0000000);
4956 
4957 	return IXGBE_SUCCESS;
4958 }
4959 
4960 /**
4961  * ixgbe_bypass_valid_rd_generic - Verify valid return from bit-bang.
4962  *
4963  * If we send a write we can't be sure it took until we can read back
4964  * that same register.  It can be a problem as some of the feilds may
4965  * for valid reasons change inbetween the time wrote the register and
4966  * we read it again to verify.  So this function check everything we
4967  * can check and then assumes it worked.
4968  *
4969  * @u32 in_reg - The register cmd for the bit-bang read.
4970  * @u32 out_reg - The register returned from a bit-bang read.
4971  **/
4972 bool ixgbe_bypass_valid_rd_generic(u32 in_reg, u32 out_reg)
4973 {
4974 	u32 mask;
4975 
4976 	/* Page must match for all control pages */
4977 	if ((in_reg & BYPASS_PAGE_M) != (out_reg & BYPASS_PAGE_M))
4978 		return FALSE;
4979 
4980 	switch (in_reg & BYPASS_PAGE_M) {
4981 	case BYPASS_PAGE_CTL0:
4982 		/* All the following can't change since the last write
4983 		 *  - All the event actions
4984 		 *  - The timeout value
4985 		 */
4986 		mask = BYPASS_AUX_ON_M | BYPASS_MAIN_ON_M |
4987 		       BYPASS_MAIN_OFF_M | BYPASS_AUX_OFF_M |
4988 		       BYPASS_WDTIMEOUT_M |
4989 		       BYPASS_WDT_VALUE_M;
4990 		if ((out_reg & mask) != (in_reg & mask))
4991 			return FALSE;
4992 
4993 		/* 0x0 is never a valid value for bypass status */
4994 		if (!(out_reg & BYPASS_STATUS_OFF_M))
4995 			return FALSE;
4996 		break;
4997 	case BYPASS_PAGE_CTL1:
4998 		/* All the following can't change since the last write
4999 		 *  - time valid bit
5000 		 *  - time we last sent
5001 		 */
5002 		mask = BYPASS_CTL1_VALID_M | BYPASS_CTL1_TIME_M;
5003 		if ((out_reg & mask) != (in_reg & mask))
5004 			return FALSE;
5005 		break;
5006 	case BYPASS_PAGE_CTL2:
5007 		/* All we can check in this page is control number
5008 		 * which is already done above.
5009 		 */
5010 		break;
5011 	}
5012 
5013 	/* We are as sure as we can be return TRUE */
5014 	return TRUE;
5015 }
5016 
5017 /**
5018  *  ixgbe_bypass_set_generic - Set a bypass field in the FW CTRL Regiter.
5019  *
5020  *  @hw: pointer to hardware structure
5021  *  @cmd: The control word we are setting.
5022  *  @event: The event we are setting in the FW.  This also happens to
5023  *	    be the mask for the event we are setting (handy)
5024  *  @action: The action we set the event to in the FW. This is in a
5025  *	     bit field that happens to be what we want to put in
5026  *	     the event spot (also handy)
5027  **/
5028 s32 ixgbe_bypass_set_generic(struct ixgbe_hw *hw, u32 ctrl, u32 event,
5029 			     u32 action)
5030 {
5031 	u32 by_ctl = 0;
5032 	u32 cmd, verify;
5033 	u32 count = 0;
5034 
5035 	/* Get current values */
5036 	cmd = ctrl;	/* just reading only need control number */
5037 	if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl))
5038 		return IXGBE_ERR_INVALID_ARGUMENT;
5039 
5040 	/* Set to new action */
5041 	cmd = (by_ctl & ~event) | BYPASS_WE | action;
5042 	if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl))
5043 		return IXGBE_ERR_INVALID_ARGUMENT;
5044 
5045 	/* Page 0 force a FW eeprom write which is slow so verify */
5046 	if ((cmd & BYPASS_PAGE_M) == BYPASS_PAGE_CTL0) {
5047 		verify = BYPASS_PAGE_CTL0;
5048 		do {
5049 			if (count++ > 5)
5050 				return IXGBE_BYPASS_FW_WRITE_FAILURE;
5051 
5052 			if (ixgbe_bypass_rw_generic(hw, verify, &by_ctl))
5053 				return IXGBE_ERR_INVALID_ARGUMENT;
5054 		} while (!ixgbe_bypass_valid_rd_generic(cmd, by_ctl));
5055 	} else {
5056 		/* We have give the FW time for the write to stick */
5057 		msec_delay(100);
5058 	}
5059 
5060 	return IXGBE_SUCCESS;
5061 }
5062 
5063 /**
5064  *  ixgbe_bypass_rd_eep_generic - Read the bypass FW eeprom addres.
5065  *
5066  *  @hw: pointer to hardware structure
5067  *  @addr: The bypass eeprom address to read.
5068  *  @value: The 8b of data at the address above.
5069  **/
5070 s32 ixgbe_bypass_rd_eep_generic(struct ixgbe_hw *hw, u32 addr, u8 *value)
5071 {
5072 	u32 cmd;
5073 	u32 status;
5074 
5075 
5076 	/* send the request */
5077 	cmd = BYPASS_PAGE_CTL2 | BYPASS_WE;
5078 	cmd |= (addr << BYPASS_CTL2_OFFSET_SHIFT) & BYPASS_CTL2_OFFSET_M;
5079 	if (ixgbe_bypass_rw_generic(hw, cmd, &status))
5080 		return IXGBE_ERR_INVALID_ARGUMENT;
5081 
5082 	/* We have give the FW time for the write to stick */
5083 	msec_delay(100);
5084 
5085 	/* now read the results */
5086 	cmd &= ~BYPASS_WE;
5087 	if (ixgbe_bypass_rw_generic(hw, cmd, &status))
5088 		return IXGBE_ERR_INVALID_ARGUMENT;
5089 
5090 	*value = status & BYPASS_CTL2_DATA_M;
5091 
5092 	return IXGBE_SUCCESS;
5093 }
5094 
5095 /**
5096  *  ixgbe_get_orom_version - Return option ROM from EEPROM
5097  *
5098  *  @hw: pointer to hardware structure
5099  *  @nvm_ver: pointer to output structure
5100  *
5101  *  if valid option ROM version, nvm_ver->or_valid set to TRUE
5102  *  else nvm_ver->or_valid is FALSE.
5103  **/
5104 void ixgbe_get_orom_version(struct ixgbe_hw *hw,
5105 			    struct ixgbe_nvm_version *nvm_ver)
5106 {
5107 	u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl;
5108 
5109 	nvm_ver->or_valid = FALSE;
5110 	/* Option Rom may or may not be present.  Start with pointer */
5111 	hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset);
5112 
5113 	/* make sure offset is valid */
5114 	if ((offset == 0x0) || (offset == NVM_INVALID_PTR))
5115 		return;
5116 
5117 	hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh);
5118 	hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl);
5119 
5120 	/* option rom exists and is valid */
5121 	if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 ||
5122 	    eeprom_cfg_blkl == NVM_VER_INVALID ||
5123 	    eeprom_cfg_blkh == NVM_VER_INVALID)
5124 		return;
5125 
5126 	nvm_ver->or_valid = TRUE;
5127 	nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT;
5128 	nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) |
5129 			    (eeprom_cfg_blkh >> NVM_OROM_SHIFT);
5130 	nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK;
5131 }
5132 
5133 /**
5134  *  ixgbe_get_oem_prod_version - Return OEM Product version
5135  *
5136  *  @hw: pointer to hardware structure
5137  *  @nvm_ver: pointer to output structure
5138  *
5139  *  if valid OEM product version, nvm_ver->oem_valid set to TRUE
5140  *  else nvm_ver->oem_valid is FALSE.
5141  **/
5142 void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
5143 				struct ixgbe_nvm_version *nvm_ver)
5144 {
5145 	u16 rel_num, prod_ver, mod_len, cap, offset;
5146 
5147 	nvm_ver->oem_valid = FALSE;
5148 	hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset);
5149 
5150 	/* Return is offset to OEM Product Version block is invalid */
5151 	if (offset == 0x0 && offset == NVM_INVALID_PTR)
5152 		return;
5153 
5154 	/* Read product version block */
5155 	hw->eeprom.ops.read(hw, offset, &mod_len);
5156 	hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap);
5157 
5158 	/* Return if OEM product version block is invalid */
5159 	if (mod_len != NVM_OEM_PROD_VER_MOD_LEN ||
5160 	    (cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0)
5161 		return;
5162 
5163 	hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver);
5164 	hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num);
5165 
5166 	/* Return if version is invalid */
5167 	if ((rel_num | prod_ver) == 0x0 ||
5168 	    rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID)
5169 		return;
5170 
5171 	nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT;
5172 	nvm_ver->oem_minor = prod_ver & NVM_VER_MASK;
5173 	nvm_ver->oem_release = rel_num;
5174 	nvm_ver->oem_valid = TRUE;
5175 }
5176 
5177 /**
5178  *  ixgbe_get_etk_id - Return Etrack ID from EEPROM
5179  *
5180  *  @hw: pointer to hardware structure
5181  *  @nvm_ver: pointer to output structure
5182  *
5183  *  word read errors will return 0xFFFF
5184  **/
5185 void ixgbe_get_etk_id(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver)
5186 {
5187 	u16 etk_id_l, etk_id_h;
5188 
5189 	if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_LOW, &etk_id_l))
5190 		etk_id_l = NVM_VER_INVALID;
5191 	if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_HI, &etk_id_h))
5192 		etk_id_h = NVM_VER_INVALID;
5193 
5194 	/* The word order for the version format is determined by high order
5195 	 * word bit 15.
5196 	 */
5197 	if ((etk_id_h & NVM_ETK_VALID) == 0) {
5198 		nvm_ver->etk_id = etk_id_h;
5199 		nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT);
5200 	} else {
5201 		nvm_ver->etk_id = etk_id_l;
5202 		nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT);
5203 	}
5204 }
5205 
5206 
5207 /**
5208  * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg
5209  * @hw: pointer to hardware structure
5210  * @map: pointer to u8 arr for returning map
5211  *
5212  * Read the rtrup2tc HW register and resolve its content into map
5213  **/
5214 void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map)
5215 {
5216 	u32 reg, i;
5217 
5218 	reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
5219 	for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
5220 		map[i] = IXGBE_RTRUP2TC_UP_MASK &
5221 			(reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
5222 	return;
5223 }
5224 
5225 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
5226 {
5227 	u32 pfdtxgswc;
5228 	u32 rxctrl;
5229 
5230 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5231 	if (rxctrl & IXGBE_RXCTRL_RXEN) {
5232 		if (hw->mac.type != ixgbe_mac_82598EB) {
5233 			pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
5234 			if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
5235 				pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
5236 				IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
5237 				hw->mac.set_lben = TRUE;
5238 			} else {
5239 				hw->mac.set_lben = FALSE;
5240 			}
5241 		}
5242 		rxctrl &= ~IXGBE_RXCTRL_RXEN;
5243 		IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
5244 	}
5245 }
5246 
5247 void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
5248 {
5249 	u32 pfdtxgswc;
5250 	u32 rxctrl;
5251 
5252 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5253 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN));
5254 
5255 	if (hw->mac.type != ixgbe_mac_82598EB) {
5256 		if (hw->mac.set_lben) {
5257 			pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
5258 			pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN;
5259 			IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
5260 			hw->mac.set_lben = FALSE;
5261 		}
5262 	}
5263 }
5264 
5265 /**
5266  * ixgbe_mng_present - returns TRUE when management capability is present
5267  * @hw: pointer to hardware structure
5268  */
5269 bool ixgbe_mng_present(struct ixgbe_hw *hw)
5270 {
5271 	u32 fwsm;
5272 
5273 	if (hw->mac.type < ixgbe_mac_82599EB)
5274 		return FALSE;
5275 
5276 	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
5277 
5278 	return !!(fwsm & IXGBE_FWSM_FW_MODE_PT);
5279 }
5280 
5281 /**
5282  * ixgbe_mng_enabled - Is the manageability engine enabled?
5283  * @hw: pointer to hardware structure
5284  *
5285  * Returns TRUE if the manageability engine is enabled.
5286  **/
5287 bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
5288 {
5289 	u32 fwsm, manc, factps;
5290 
5291 	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
5292 	if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
5293 		return FALSE;
5294 
5295 	manc = IXGBE_READ_REG(hw, IXGBE_MANC);
5296 	if (!(manc & IXGBE_MANC_RCV_TCO_EN))
5297 		return FALSE;
5298 
5299 	if (hw->mac.type <= ixgbe_mac_X540) {
5300 		factps = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
5301 		if (factps & IXGBE_FACTPS_MNGCG)
5302 			return FALSE;
5303 	}
5304 
5305 	return TRUE;
5306 }
5307 
5308 /**
5309  *  ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
5310  *  @hw: pointer to hardware structure
5311  *  @speed: new link speed
5312  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
5313  *
5314  *  Set the link speed in the MAC and/or PHY register and restarts link.
5315  **/
5316 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
5317 					  ixgbe_link_speed speed,
5318 					  bool autoneg_wait_to_complete)
5319 {
5320 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
5321 	ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
5322 	s32 status = IXGBE_SUCCESS;
5323 	u32 speedcnt = 0;
5324 	u32 i = 0;
5325 	bool autoneg, link_up = FALSE;
5326 
5327 	DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
5328 
5329 	/* Mask off requested but non-supported speeds */
5330 	status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
5331 	if (status != IXGBE_SUCCESS)
5332 		return status;
5333 
5334 	speed &= link_speed;
5335 
5336 	/* Try each speed one by one, highest priority first.  We do this in
5337 	 * software because 10Gb fiber doesn't support speed autonegotiation.
5338 	 */
5339 	if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
5340 		speedcnt++;
5341 		highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
5342 
5343 		/* Set the module link speed */
5344 		switch (hw->phy.media_type) {
5345 		case ixgbe_media_type_fiber_fixed:
5346 		case ixgbe_media_type_fiber:
5347 			ixgbe_set_rate_select_speed(hw,
5348 						    IXGBE_LINK_SPEED_10GB_FULL);
5349 			break;
5350 		case ixgbe_media_type_fiber_qsfp:
5351 			/* QSFP module automatically detects MAC link speed */
5352 			break;
5353 		default:
5354 			DEBUGOUT("Unexpected media type.\n");
5355 			break;
5356 		}
5357 
5358 		/* Allow module to change analog characteristics (1G->10G) */
5359 		msec_delay(40);
5360 
5361 		status = ixgbe_setup_mac_link(hw,
5362 					      IXGBE_LINK_SPEED_10GB_FULL,
5363 					      autoneg_wait_to_complete);
5364 		if (status != IXGBE_SUCCESS)
5365 			return status;
5366 
5367 		/* Flap the Tx laser if it has not already been done */
5368 		ixgbe_flap_tx_laser(hw);
5369 
5370 		/* Wait for the controller to acquire link.  Per IEEE 802.3ap,
5371 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
5372 		 * attempted.  82599 uses the same timing for 10g SFI.
5373 		 */
5374 		for (i = 0; i < 5; i++) {
5375 			/* Wait for the link partner to also set speed */
5376 			msec_delay(100);
5377 
5378 			/* If we have link, just jump out */
5379 			status = ixgbe_check_link(hw, &link_speed,
5380 						  &link_up, FALSE);
5381 			if (status != IXGBE_SUCCESS)
5382 				return status;
5383 
5384 			if (link_up)
5385 				goto out;
5386 		}
5387 	}
5388 
5389 	if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
5390 		speedcnt++;
5391 		if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
5392 			highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
5393 
5394 		/* Set the module link speed */
5395 		switch (hw->phy.media_type) {
5396 		case ixgbe_media_type_fiber_fixed:
5397 		case ixgbe_media_type_fiber:
5398 			ixgbe_set_rate_select_speed(hw,
5399 						    IXGBE_LINK_SPEED_1GB_FULL);
5400 			break;
5401 		case ixgbe_media_type_fiber_qsfp:
5402 			/* QSFP module automatically detects link speed */
5403 			break;
5404 		default:
5405 			DEBUGOUT("Unexpected media type.\n");
5406 			break;
5407 		}
5408 
5409 		/* Allow module to change analog characteristics (10G->1G) */
5410 		msec_delay(40);
5411 
5412 		status = ixgbe_setup_mac_link(hw,
5413 					      IXGBE_LINK_SPEED_1GB_FULL,
5414 					      autoneg_wait_to_complete);
5415 		if (status != IXGBE_SUCCESS)
5416 			return status;
5417 
5418 		/* Flap the Tx laser if it has not already been done */
5419 		ixgbe_flap_tx_laser(hw);
5420 
5421 		/* Wait for the link partner to also set speed */
5422 		msec_delay(100);
5423 
5424 		/* If we have link, just jump out */
5425 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
5426 		if (status != IXGBE_SUCCESS)
5427 			return status;
5428 
5429 		if (link_up)
5430 			goto out;
5431 	}
5432 
5433 	/* We didn't get link.  Configure back to the highest speed we tried,
5434 	 * (if there was more than one).  We call ourselves back with just the
5435 	 * single highest speed that the user requested.
5436 	 */
5437 	if (speedcnt > 1)
5438 		status = ixgbe_setup_mac_link_multispeed_fiber(hw,
5439 						      highest_link_speed,
5440 						      autoneg_wait_to_complete);
5441 
5442 out:
5443 	/* Set autoneg_advertised value based on input link speed */
5444 	hw->phy.autoneg_advertised = 0;
5445 
5446 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
5447 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
5448 
5449 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
5450 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
5451 
5452 	return status;
5453 }
5454 
5455 /**
5456  *  ixgbe_set_soft_rate_select_speed - Set module link speed
5457  *  @hw: pointer to hardware structure
5458  *  @speed: link speed to set
5459  *
5460  *  Set module link speed via the soft rate select.
5461  */
5462 void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
5463 					ixgbe_link_speed speed)
5464 {
5465 	s32 status;
5466 	u8 rs, eeprom_data;
5467 
5468 	switch (speed) {
5469 	case IXGBE_LINK_SPEED_10GB_FULL:
5470 		/* one bit mask same as setting on */
5471 		rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
5472 		break;
5473 	case IXGBE_LINK_SPEED_1GB_FULL:
5474 		rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
5475 		break;
5476 	default:
5477 		DEBUGOUT("Invalid fixed module speed\n");
5478 		return;
5479 	}
5480 
5481 	/* Set RS0 */
5482 	status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
5483 					   IXGBE_I2C_EEPROM_DEV_ADDR2,
5484 					   &eeprom_data);
5485 	if (status) {
5486 		DEBUGOUT("Failed to read Rx Rate Select RS0\n");
5487 		goto out;
5488 	}
5489 
5490 	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
5491 
5492 	status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
5493 					    IXGBE_I2C_EEPROM_DEV_ADDR2,
5494 					    eeprom_data);
5495 	if (status) {
5496 		DEBUGOUT("Failed to write Rx Rate Select RS0\n");
5497 		goto out;
5498 	}
5499 
5500 	/* Set RS1 */
5501 	status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
5502 					   IXGBE_I2C_EEPROM_DEV_ADDR2,
5503 					   &eeprom_data);
5504 	if (status) {
5505 		DEBUGOUT("Failed to read Rx Rate Select RS1\n");
5506 		goto out;
5507 	}
5508 
5509 	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
5510 
5511 	status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
5512 					    IXGBE_I2C_EEPROM_DEV_ADDR2,
5513 					    eeprom_data);
5514 	if (status) {
5515 		DEBUGOUT("Failed to write Rx Rate Select RS1\n");
5516 		goto out;
5517 	}
5518 out:
5519 	return;
5520 }
5521