xref: /freebsd/sys/dev/ixgbe/ixgbe_common.c (revision 5bf5ca772c6de2d53344a78cf461447cc322ccea)
1 /******************************************************************************
2   SPDX-License-Identifier: BSD-3-Clause
3 
4   Copyright (c) 2001-2017, Intel Corporation
5   All rights reserved.
6 
7   Redistribution and use in source and binary forms, with or without
8   modification, are permitted provided that the following conditions are met:
9 
10    1. Redistributions of source code must retain the above copyright notice,
11       this list of conditions and the following disclaimer.
12 
13    2. Redistributions in binary form must reproduce the above copyright
14       notice, this list of conditions and the following disclaimer in the
15       documentation and/or other materials provided with the distribution.
16 
17    3. Neither the name of the Intel Corporation nor the names of its
18       contributors may be used to endorse or promote products derived from
19       this software without specific prior written permission.
20 
21   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31   POSSIBILITY OF SUCH DAMAGE.
32 
33 ******************************************************************************/
34 /*$FreeBSD$*/
35 
36 #include "ixgbe_common.h"
37 #include "ixgbe_phy.h"
38 #include "ixgbe_dcb.h"
39 #include "ixgbe_dcb_82599.h"
40 #include "ixgbe_api.h"
41 
42 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
43 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
44 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
45 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
46 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
47 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
48 					u16 count);
49 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
50 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
51 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
52 static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
53 
54 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
55 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
56 					 u16 *san_mac_offset);
57 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
58 					     u16 words, u16 *data);
59 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
60 					      u16 words, u16 *data);
61 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
62 						 u16 offset);
63 
64 /**
65  *  ixgbe_init_ops_generic - Inits function ptrs
66  *  @hw: pointer to the hardware structure
67  *
68  *  Initialize the function pointers.
69  **/
70 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
71 {
72 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
73 	struct ixgbe_mac_info *mac = &hw->mac;
74 	u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
75 
76 	DEBUGFUNC("ixgbe_init_ops_generic");
77 
78 	/* EEPROM */
79 	eeprom->ops.init_params = ixgbe_init_eeprom_params_generic;
80 	/* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
81 	if (eec & IXGBE_EEC_PRES) {
82 		eeprom->ops.read = ixgbe_read_eerd_generic;
83 		eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic;
84 	} else {
85 		eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic;
86 		eeprom->ops.read_buffer =
87 				 ixgbe_read_eeprom_buffer_bit_bang_generic;
88 	}
89 	eeprom->ops.write = ixgbe_write_eeprom_generic;
90 	eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic;
91 	eeprom->ops.validate_checksum =
92 				      ixgbe_validate_eeprom_checksum_generic;
93 	eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic;
94 	eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic;
95 
96 	/* MAC */
97 	mac->ops.init_hw = ixgbe_init_hw_generic;
98 	mac->ops.reset_hw = NULL;
99 	mac->ops.start_hw = ixgbe_start_hw_generic;
100 	mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic;
101 	mac->ops.get_media_type = NULL;
102 	mac->ops.get_supported_physical_layer = NULL;
103 	mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic;
104 	mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic;
105 	mac->ops.stop_adapter = ixgbe_stop_adapter_generic;
106 	mac->ops.get_bus_info = ixgbe_get_bus_info_generic;
107 	mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie;
108 	mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync;
109 	mac->ops.release_swfw_sync = ixgbe_release_swfw_sync;
110 	mac->ops.prot_autoc_read = prot_autoc_read_generic;
111 	mac->ops.prot_autoc_write = prot_autoc_write_generic;
112 
113 	/* LEDs */
114 	mac->ops.led_on = ixgbe_led_on_generic;
115 	mac->ops.led_off = ixgbe_led_off_generic;
116 	mac->ops.blink_led_start = ixgbe_blink_led_start_generic;
117 	mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic;
118 	mac->ops.init_led_link_act = ixgbe_init_led_link_act_generic;
119 
120 	/* RAR, Multicast, VLAN */
121 	mac->ops.set_rar = ixgbe_set_rar_generic;
122 	mac->ops.clear_rar = ixgbe_clear_rar_generic;
123 	mac->ops.insert_mac_addr = NULL;
124 	mac->ops.set_vmdq = NULL;
125 	mac->ops.clear_vmdq = NULL;
126 	mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic;
127 	mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic;
128 	mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic;
129 	mac->ops.enable_mc = ixgbe_enable_mc_generic;
130 	mac->ops.disable_mc = ixgbe_disable_mc_generic;
131 	mac->ops.clear_vfta = NULL;
132 	mac->ops.set_vfta = NULL;
133 	mac->ops.set_vlvf = NULL;
134 	mac->ops.init_uta_tables = NULL;
135 	mac->ops.enable_rx = ixgbe_enable_rx_generic;
136 	mac->ops.disable_rx = ixgbe_disable_rx_generic;
137 
138 	/* Flow Control */
139 	mac->ops.fc_enable = ixgbe_fc_enable_generic;
140 	mac->ops.setup_fc = ixgbe_setup_fc_generic;
141 	mac->ops.fc_autoneg = ixgbe_fc_autoneg;
142 
143 	/* Link */
144 	mac->ops.get_link_capabilities = NULL;
145 	mac->ops.setup_link = NULL;
146 	mac->ops.check_link = NULL;
147 	mac->ops.dmac_config = NULL;
148 	mac->ops.dmac_update_tcs = NULL;
149 	mac->ops.dmac_config_tcs = NULL;
150 
151 	return IXGBE_SUCCESS;
152 }
153 
154 /**
155  * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
156  * of flow control
157  * @hw: pointer to hardware structure
158  *
159  * This function returns TRUE if the device supports flow control
160  * autonegotiation, and FALSE if it does not.
161  *
162  **/
163 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
164 {
165 	bool supported = FALSE;
166 	ixgbe_link_speed speed;
167 	bool link_up;
168 
169 	DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
170 
171 	switch (hw->phy.media_type) {
172 	case ixgbe_media_type_fiber_fixed:
173 	case ixgbe_media_type_fiber_qsfp:
174 	case ixgbe_media_type_fiber:
175 		/* flow control autoneg black list */
176 		switch (hw->device_id) {
177 		case IXGBE_DEV_ID_X550EM_A_SFP:
178 		case IXGBE_DEV_ID_X550EM_A_SFP_N:
179 		case IXGBE_DEV_ID_X550EM_A_QSFP:
180 		case IXGBE_DEV_ID_X550EM_A_QSFP_N:
181 			supported = FALSE;
182 			break;
183 		default:
184 			hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
185 			/* if link is down, assume supported */
186 			if (link_up)
187 				supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
188 				TRUE : FALSE;
189 			else
190 				supported = TRUE;
191 		}
192 
193 		break;
194 	case ixgbe_media_type_backplane:
195 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI)
196 			supported = FALSE;
197 		else
198 			supported = TRUE;
199 		break;
200 	case ixgbe_media_type_copper:
201 		/* only some copper devices support flow control autoneg */
202 		switch (hw->device_id) {
203 		case IXGBE_DEV_ID_82599_T3_LOM:
204 		case IXGBE_DEV_ID_X540T:
205 		case IXGBE_DEV_ID_X540T1:
206 		case IXGBE_DEV_ID_X540_BYPASS:
207 		case IXGBE_DEV_ID_X550T:
208 		case IXGBE_DEV_ID_X550T1:
209 		case IXGBE_DEV_ID_X550EM_X_10G_T:
210 		case IXGBE_DEV_ID_X550EM_A_10G_T:
211 		case IXGBE_DEV_ID_X550EM_A_1G_T:
212 		case IXGBE_DEV_ID_X550EM_A_1G_T_L:
213 			supported = TRUE;
214 			break;
215 		default:
216 			supported = FALSE;
217 		}
218 	default:
219 		break;
220 	}
221 
222 	if (!supported)
223 		ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
224 			      "Device %x does not support flow control autoneg",
225 			      hw->device_id);
226 
227 	return supported;
228 }
229 
230 /**
231  *  ixgbe_setup_fc_generic - Set up flow control
232  *  @hw: pointer to hardware structure
233  *
234  *  Called at init time to set up flow control.
235  **/
236 s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
237 {
238 	s32 ret_val = IXGBE_SUCCESS;
239 	u32 reg = 0, reg_bp = 0;
240 	u16 reg_cu = 0;
241 	bool locked = FALSE;
242 
243 	DEBUGFUNC("ixgbe_setup_fc_generic");
244 
245 	/* Validate the requested mode */
246 	if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
247 		ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
248 			   "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
249 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
250 		goto out;
251 	}
252 
253 	/*
254 	 * 10gig parts do not have a word in the EEPROM to determine the
255 	 * default flow control setting, so we explicitly set it to full.
256 	 */
257 	if (hw->fc.requested_mode == ixgbe_fc_default)
258 		hw->fc.requested_mode = ixgbe_fc_full;
259 
260 	/*
261 	 * Set up the 1G and 10G flow control advertisement registers so the
262 	 * HW will be able to do fc autoneg once the cable is plugged in.  If
263 	 * we link at 10G, the 1G advertisement is harmless and vice versa.
264 	 */
265 	switch (hw->phy.media_type) {
266 	case ixgbe_media_type_backplane:
267 		/* some MAC's need RMW protection on AUTOC */
268 		ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &reg_bp);
269 		if (ret_val != IXGBE_SUCCESS)
270 			goto out;
271 
272 		/* fall through - only backplane uses autoc */
273 	case ixgbe_media_type_fiber_fixed:
274 	case ixgbe_media_type_fiber_qsfp:
275 	case ixgbe_media_type_fiber:
276 		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
277 
278 		break;
279 	case ixgbe_media_type_copper:
280 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
281 				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg_cu);
282 		break;
283 	default:
284 		break;
285 	}
286 
287 	/*
288 	 * The possible values of fc.requested_mode are:
289 	 * 0: Flow control is completely disabled
290 	 * 1: Rx flow control is enabled (we can receive pause frames,
291 	 *    but not send pause frames).
292 	 * 2: Tx flow control is enabled (we can send pause frames but
293 	 *    we do not support receiving pause frames).
294 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
295 	 * other: Invalid.
296 	 */
297 	switch (hw->fc.requested_mode) {
298 	case ixgbe_fc_none:
299 		/* Flow control completely disabled by software override. */
300 		reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
301 		if (hw->phy.media_type == ixgbe_media_type_backplane)
302 			reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
303 				    IXGBE_AUTOC_ASM_PAUSE);
304 		else if (hw->phy.media_type == ixgbe_media_type_copper)
305 			reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
306 		break;
307 	case ixgbe_fc_tx_pause:
308 		/*
309 		 * Tx Flow control is enabled, and Rx Flow control is
310 		 * disabled by software override.
311 		 */
312 		reg |= IXGBE_PCS1GANA_ASM_PAUSE;
313 		reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
314 		if (hw->phy.media_type == ixgbe_media_type_backplane) {
315 			reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
316 			reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
317 		} else if (hw->phy.media_type == ixgbe_media_type_copper) {
318 			reg_cu |= IXGBE_TAF_ASM_PAUSE;
319 			reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
320 		}
321 		break;
322 	case ixgbe_fc_rx_pause:
323 		/*
324 		 * Rx Flow control is enabled and Tx Flow control is
325 		 * disabled by software override. Since there really
326 		 * isn't a way to advertise that we are capable of RX
327 		 * Pause ONLY, we will advertise that we support both
328 		 * symmetric and asymmetric Rx PAUSE, as such we fall
329 		 * through to the fc_full statement.  Later, we will
330 		 * disable the adapter's ability to send PAUSE frames.
331 		 */
332 	case ixgbe_fc_full:
333 		/* Flow control (both Rx and Tx) is enabled by SW override. */
334 		reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
335 		if (hw->phy.media_type == ixgbe_media_type_backplane)
336 			reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
337 				  IXGBE_AUTOC_ASM_PAUSE;
338 		else if (hw->phy.media_type == ixgbe_media_type_copper)
339 			reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
340 		break;
341 	default:
342 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
343 			     "Flow control param set incorrectly\n");
344 		ret_val = IXGBE_ERR_CONFIG;
345 		goto out;
346 		break;
347 	}
348 
349 	if (hw->mac.type < ixgbe_mac_X540) {
350 		/*
351 		 * Enable auto-negotiation between the MAC & PHY;
352 		 * the MAC will advertise clause 37 flow control.
353 		 */
354 		IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
355 		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
356 
357 		/* Disable AN timeout */
358 		if (hw->fc.strict_ieee)
359 			reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
360 
361 		IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
362 		DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
363 	}
364 
365 	/*
366 	 * AUTOC restart handles negotiation of 1G and 10G on backplane
367 	 * and copper. There is no need to set the PCS1GCTL register.
368 	 *
369 	 */
370 	if (hw->phy.media_type == ixgbe_media_type_backplane) {
371 		reg_bp |= IXGBE_AUTOC_AN_RESTART;
372 		ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
373 		if (ret_val)
374 			goto out;
375 	} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
376 		    (ixgbe_device_supports_autoneg_fc(hw))) {
377 		hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
378 				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
379 	}
380 
381 	DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
382 out:
383 	return ret_val;
384 }
385 
386 /**
387  *  ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
388  *  @hw: pointer to hardware structure
389  *
390  *  Starts the hardware by filling the bus info structure and media type, clears
391  *  all on chip counters, initializes receive address registers, multicast
392  *  table, VLAN filter table, calls routine to set up link and flow control
393  *  settings, and leaves transmit and receive units disabled and uninitialized
394  **/
395 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
396 {
397 	s32 ret_val;
398 	u32 ctrl_ext;
399 	u16 device_caps;
400 
401 	DEBUGFUNC("ixgbe_start_hw_generic");
402 
403 	/* Set the media type */
404 	hw->phy.media_type = hw->mac.ops.get_media_type(hw);
405 
406 	/* PHY ops initialization must be done in reset_hw() */
407 
408 	/* Clear the VLAN filter table */
409 	hw->mac.ops.clear_vfta(hw);
410 
411 	/* Clear statistics registers */
412 	hw->mac.ops.clear_hw_cntrs(hw);
413 
414 	/* Set No Snoop Disable */
415 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
416 	ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
417 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
418 	IXGBE_WRITE_FLUSH(hw);
419 
420 	/* Setup flow control */
421 	ret_val = ixgbe_setup_fc(hw);
422 	if (ret_val != IXGBE_SUCCESS && ret_val != IXGBE_NOT_IMPLEMENTED) {
423 		DEBUGOUT1("Flow control setup failed, returning %d\n", ret_val);
424 		return ret_val;
425 	}
426 
427 	/* Cache bit indicating need for crosstalk fix */
428 	switch (hw->mac.type) {
429 	case ixgbe_mac_82599EB:
430 	case ixgbe_mac_X550EM_x:
431 	case ixgbe_mac_X550EM_a:
432 		hw->mac.ops.get_device_caps(hw, &device_caps);
433 		if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR)
434 			hw->need_crosstalk_fix = FALSE;
435 		else
436 			hw->need_crosstalk_fix = TRUE;
437 		break;
438 	default:
439 		hw->need_crosstalk_fix = FALSE;
440 		break;
441 	}
442 
443 	/* Clear adapter stopped flag */
444 	hw->adapter_stopped = FALSE;
445 
446 	return IXGBE_SUCCESS;
447 }
448 
449 /**
450  *  ixgbe_start_hw_gen2 - Init sequence for common device family
451  *  @hw: pointer to hw structure
452  *
453  * Performs the init sequence common to the second generation
454  * of 10 GbE devices.
455  * Devices in the second generation:
456  *     82599
457  *     X540
458  **/
459 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
460 {
461 	u32 i;
462 	u32 regval;
463 
464 	/* Clear the rate limiters */
465 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
466 		IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
467 		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
468 	}
469 	IXGBE_WRITE_FLUSH(hw);
470 
471 	/* Disable relaxed ordering */
472 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
473 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
474 		regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
475 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
476 	}
477 
478 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
479 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
480 		regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
481 			    IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
482 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
483 	}
484 
485 	return IXGBE_SUCCESS;
486 }
487 
488 /**
489  *  ixgbe_init_hw_generic - Generic hardware initialization
490  *  @hw: pointer to hardware structure
491  *
492  *  Initialize the hardware by resetting the hardware, filling the bus info
493  *  structure and media type, clears all on chip counters, initializes receive
494  *  address registers, multicast table, VLAN filter table, calls routine to set
495  *  up link and flow control settings, and leaves transmit and receive units
496  *  disabled and uninitialized
497  **/
498 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
499 {
500 	s32 status;
501 
502 	DEBUGFUNC("ixgbe_init_hw_generic");
503 
504 	/* Reset the hardware */
505 	status = hw->mac.ops.reset_hw(hw);
506 
507 	if (status == IXGBE_SUCCESS || status == IXGBE_ERR_SFP_NOT_PRESENT) {
508 		/* Start the HW */
509 		status = hw->mac.ops.start_hw(hw);
510 	}
511 
512 	/* Initialize the LED link active for LED blink support */
513 	if (hw->mac.ops.init_led_link_act)
514 		hw->mac.ops.init_led_link_act(hw);
515 
516 	if (status != IXGBE_SUCCESS)
517 		DEBUGOUT1("Failed to initialize HW, STATUS = %d\n", status);
518 
519 	return status;
520 }
521 
522 /**
523  *  ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
524  *  @hw: pointer to hardware structure
525  *
526  *  Clears all hardware statistics counters by reading them from the hardware
527  *  Statistics counters are clear on read.
528  **/
529 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
530 {
531 	u16 i = 0;
532 
533 	DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
534 
535 	IXGBE_READ_REG(hw, IXGBE_CRCERRS);
536 	IXGBE_READ_REG(hw, IXGBE_ILLERRC);
537 	IXGBE_READ_REG(hw, IXGBE_ERRBC);
538 	IXGBE_READ_REG(hw, IXGBE_MSPDC);
539 	for (i = 0; i < 8; i++)
540 		IXGBE_READ_REG(hw, IXGBE_MPC(i));
541 
542 	IXGBE_READ_REG(hw, IXGBE_MLFC);
543 	IXGBE_READ_REG(hw, IXGBE_MRFC);
544 	IXGBE_READ_REG(hw, IXGBE_RLEC);
545 	IXGBE_READ_REG(hw, IXGBE_LXONTXC);
546 	IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
547 	if (hw->mac.type >= ixgbe_mac_82599EB) {
548 		IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
549 		IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
550 	} else {
551 		IXGBE_READ_REG(hw, IXGBE_LXONRXC);
552 		IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
553 	}
554 
555 	for (i = 0; i < 8; i++) {
556 		IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
557 		IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
558 		if (hw->mac.type >= ixgbe_mac_82599EB) {
559 			IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
560 			IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
561 		} else {
562 			IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
563 			IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
564 		}
565 	}
566 	if (hw->mac.type >= ixgbe_mac_82599EB)
567 		for (i = 0; i < 8; i++)
568 			IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
569 	IXGBE_READ_REG(hw, IXGBE_PRC64);
570 	IXGBE_READ_REG(hw, IXGBE_PRC127);
571 	IXGBE_READ_REG(hw, IXGBE_PRC255);
572 	IXGBE_READ_REG(hw, IXGBE_PRC511);
573 	IXGBE_READ_REG(hw, IXGBE_PRC1023);
574 	IXGBE_READ_REG(hw, IXGBE_PRC1522);
575 	IXGBE_READ_REG(hw, IXGBE_GPRC);
576 	IXGBE_READ_REG(hw, IXGBE_BPRC);
577 	IXGBE_READ_REG(hw, IXGBE_MPRC);
578 	IXGBE_READ_REG(hw, IXGBE_GPTC);
579 	IXGBE_READ_REG(hw, IXGBE_GORCL);
580 	IXGBE_READ_REG(hw, IXGBE_GORCH);
581 	IXGBE_READ_REG(hw, IXGBE_GOTCL);
582 	IXGBE_READ_REG(hw, IXGBE_GOTCH);
583 	if (hw->mac.type == ixgbe_mac_82598EB)
584 		for (i = 0; i < 8; i++)
585 			IXGBE_READ_REG(hw, IXGBE_RNBC(i));
586 	IXGBE_READ_REG(hw, IXGBE_RUC);
587 	IXGBE_READ_REG(hw, IXGBE_RFC);
588 	IXGBE_READ_REG(hw, IXGBE_ROC);
589 	IXGBE_READ_REG(hw, IXGBE_RJC);
590 	IXGBE_READ_REG(hw, IXGBE_MNGPRC);
591 	IXGBE_READ_REG(hw, IXGBE_MNGPDC);
592 	IXGBE_READ_REG(hw, IXGBE_MNGPTC);
593 	IXGBE_READ_REG(hw, IXGBE_TORL);
594 	IXGBE_READ_REG(hw, IXGBE_TORH);
595 	IXGBE_READ_REG(hw, IXGBE_TPR);
596 	IXGBE_READ_REG(hw, IXGBE_TPT);
597 	IXGBE_READ_REG(hw, IXGBE_PTC64);
598 	IXGBE_READ_REG(hw, IXGBE_PTC127);
599 	IXGBE_READ_REG(hw, IXGBE_PTC255);
600 	IXGBE_READ_REG(hw, IXGBE_PTC511);
601 	IXGBE_READ_REG(hw, IXGBE_PTC1023);
602 	IXGBE_READ_REG(hw, IXGBE_PTC1522);
603 	IXGBE_READ_REG(hw, IXGBE_MPTC);
604 	IXGBE_READ_REG(hw, IXGBE_BPTC);
605 	for (i = 0; i < 16; i++) {
606 		IXGBE_READ_REG(hw, IXGBE_QPRC(i));
607 		IXGBE_READ_REG(hw, IXGBE_QPTC(i));
608 		if (hw->mac.type >= ixgbe_mac_82599EB) {
609 			IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
610 			IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
611 			IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
612 			IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
613 			IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
614 		} else {
615 			IXGBE_READ_REG(hw, IXGBE_QBRC(i));
616 			IXGBE_READ_REG(hw, IXGBE_QBTC(i));
617 		}
618 	}
619 
620 	if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
621 		if (hw->phy.id == 0)
622 			ixgbe_identify_phy(hw);
623 		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
624 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
625 		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
626 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
627 		hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
628 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
629 		hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
630 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
631 	}
632 
633 	return IXGBE_SUCCESS;
634 }
635 
636 /**
637  *  ixgbe_read_pba_string_generic - Reads part number string from EEPROM
638  *  @hw: pointer to hardware structure
639  *  @pba_num: stores the part number string from the EEPROM
640  *  @pba_num_size: part number string buffer length
641  *
642  *  Reads the part number string from the EEPROM.
643  **/
644 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
645 				  u32 pba_num_size)
646 {
647 	s32 ret_val;
648 	u16 data;
649 	u16 pba_ptr;
650 	u16 offset;
651 	u16 length;
652 
653 	DEBUGFUNC("ixgbe_read_pba_string_generic");
654 
655 	if (pba_num == NULL) {
656 		DEBUGOUT("PBA string buffer was null\n");
657 		return IXGBE_ERR_INVALID_ARGUMENT;
658 	}
659 
660 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
661 	if (ret_val) {
662 		DEBUGOUT("NVM Read Error\n");
663 		return ret_val;
664 	}
665 
666 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
667 	if (ret_val) {
668 		DEBUGOUT("NVM Read Error\n");
669 		return ret_val;
670 	}
671 
672 	/*
673 	 * if data is not ptr guard the PBA must be in legacy format which
674 	 * means pba_ptr is actually our second data word for the PBA number
675 	 * and we can decode it into an ascii string
676 	 */
677 	if (data != IXGBE_PBANUM_PTR_GUARD) {
678 		DEBUGOUT("NVM PBA number is not stored as string\n");
679 
680 		/* we will need 11 characters to store the PBA */
681 		if (pba_num_size < 11) {
682 			DEBUGOUT("PBA string buffer too small\n");
683 			return IXGBE_ERR_NO_SPACE;
684 		}
685 
686 		/* extract hex string from data and pba_ptr */
687 		pba_num[0] = (data >> 12) & 0xF;
688 		pba_num[1] = (data >> 8) & 0xF;
689 		pba_num[2] = (data >> 4) & 0xF;
690 		pba_num[3] = data & 0xF;
691 		pba_num[4] = (pba_ptr >> 12) & 0xF;
692 		pba_num[5] = (pba_ptr >> 8) & 0xF;
693 		pba_num[6] = '-';
694 		pba_num[7] = 0;
695 		pba_num[8] = (pba_ptr >> 4) & 0xF;
696 		pba_num[9] = pba_ptr & 0xF;
697 
698 		/* put a null character on the end of our string */
699 		pba_num[10] = '\0';
700 
701 		/* switch all the data but the '-' to hex char */
702 		for (offset = 0; offset < 10; offset++) {
703 			if (pba_num[offset] < 0xA)
704 				pba_num[offset] += '0';
705 			else if (pba_num[offset] < 0x10)
706 				pba_num[offset] += 'A' - 0xA;
707 		}
708 
709 		return IXGBE_SUCCESS;
710 	}
711 
712 	ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
713 	if (ret_val) {
714 		DEBUGOUT("NVM Read Error\n");
715 		return ret_val;
716 	}
717 
718 	if (length == 0xFFFF || length == 0) {
719 		DEBUGOUT("NVM PBA number section invalid length\n");
720 		return IXGBE_ERR_PBA_SECTION;
721 	}
722 
723 	/* check if pba_num buffer is big enough */
724 	if (pba_num_size  < (((u32)length * 2) - 1)) {
725 		DEBUGOUT("PBA string buffer too small\n");
726 		return IXGBE_ERR_NO_SPACE;
727 	}
728 
729 	/* trim pba length from start of string */
730 	pba_ptr++;
731 	length--;
732 
733 	for (offset = 0; offset < length; offset++) {
734 		ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
735 		if (ret_val) {
736 			DEBUGOUT("NVM Read Error\n");
737 			return ret_val;
738 		}
739 		pba_num[offset * 2] = (u8)(data >> 8);
740 		pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
741 	}
742 	pba_num[offset * 2] = '\0';
743 
744 	return IXGBE_SUCCESS;
745 }
746 
747 /**
748  *  ixgbe_read_pba_num_generic - Reads part number from EEPROM
749  *  @hw: pointer to hardware structure
750  *  @pba_num: stores the part number from the EEPROM
751  *
752  *  Reads the part number from the EEPROM.
753  **/
754 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
755 {
756 	s32 ret_val;
757 	u16 data;
758 
759 	DEBUGFUNC("ixgbe_read_pba_num_generic");
760 
761 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
762 	if (ret_val) {
763 		DEBUGOUT("NVM Read Error\n");
764 		return ret_val;
765 	} else if (data == IXGBE_PBANUM_PTR_GUARD) {
766 		DEBUGOUT("NVM Not supported\n");
767 		return IXGBE_NOT_IMPLEMENTED;
768 	}
769 	*pba_num = (u32)(data << 16);
770 
771 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
772 	if (ret_val) {
773 		DEBUGOUT("NVM Read Error\n");
774 		return ret_val;
775 	}
776 	*pba_num |= data;
777 
778 	return IXGBE_SUCCESS;
779 }
780 
781 /**
782  *  ixgbe_read_pba_raw
783  *  @hw: pointer to the HW structure
784  *  @eeprom_buf: optional pointer to EEPROM image
785  *  @eeprom_buf_size: size of EEPROM image in words
786  *  @max_pba_block_size: PBA block size limit
787  *  @pba: pointer to output PBA structure
788  *
789  *  Reads PBA from EEPROM image when eeprom_buf is not NULL.
790  *  Reads PBA from physical EEPROM device when eeprom_buf is NULL.
791  *
792  **/
793 s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
794 		       u32 eeprom_buf_size, u16 max_pba_block_size,
795 		       struct ixgbe_pba *pba)
796 {
797 	s32 ret_val;
798 	u16 pba_block_size;
799 
800 	if (pba == NULL)
801 		return IXGBE_ERR_PARAM;
802 
803 	if (eeprom_buf == NULL) {
804 		ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
805 						     &pba->word[0]);
806 		if (ret_val)
807 			return ret_val;
808 	} else {
809 		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
810 			pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
811 			pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
812 		} else {
813 			return IXGBE_ERR_PARAM;
814 		}
815 	}
816 
817 	if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
818 		if (pba->pba_block == NULL)
819 			return IXGBE_ERR_PARAM;
820 
821 		ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
822 						   eeprom_buf_size,
823 						   &pba_block_size);
824 		if (ret_val)
825 			return ret_val;
826 
827 		if (pba_block_size > max_pba_block_size)
828 			return IXGBE_ERR_PARAM;
829 
830 		if (eeprom_buf == NULL) {
831 			ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
832 							     pba_block_size,
833 							     pba->pba_block);
834 			if (ret_val)
835 				return ret_val;
836 		} else {
837 			if (eeprom_buf_size > (u32)(pba->word[1] +
838 					      pba_block_size)) {
839 				memcpy(pba->pba_block,
840 				       &eeprom_buf[pba->word[1]],
841 				       pba_block_size * sizeof(u16));
842 			} else {
843 				return IXGBE_ERR_PARAM;
844 			}
845 		}
846 	}
847 
848 	return IXGBE_SUCCESS;
849 }
850 
851 /**
852  *  ixgbe_write_pba_raw
853  *  @hw: pointer to the HW structure
854  *  @eeprom_buf: optional pointer to EEPROM image
855  *  @eeprom_buf_size: size of EEPROM image in words
856  *  @pba: pointer to PBA structure
857  *
858  *  Writes PBA to EEPROM image when eeprom_buf is not NULL.
859  *  Writes PBA to physical EEPROM device when eeprom_buf is NULL.
860  *
861  **/
862 s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
863 			u32 eeprom_buf_size, struct ixgbe_pba *pba)
864 {
865 	s32 ret_val;
866 
867 	if (pba == NULL)
868 		return IXGBE_ERR_PARAM;
869 
870 	if (eeprom_buf == NULL) {
871 		ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
872 						      &pba->word[0]);
873 		if (ret_val)
874 			return ret_val;
875 	} else {
876 		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
877 			eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
878 			eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
879 		} else {
880 			return IXGBE_ERR_PARAM;
881 		}
882 	}
883 
884 	if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
885 		if (pba->pba_block == NULL)
886 			return IXGBE_ERR_PARAM;
887 
888 		if (eeprom_buf == NULL) {
889 			ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
890 							      pba->pba_block[0],
891 							      pba->pba_block);
892 			if (ret_val)
893 				return ret_val;
894 		} else {
895 			if (eeprom_buf_size > (u32)(pba->word[1] +
896 					      pba->pba_block[0])) {
897 				memcpy(&eeprom_buf[pba->word[1]],
898 				       pba->pba_block,
899 				       pba->pba_block[0] * sizeof(u16));
900 			} else {
901 				return IXGBE_ERR_PARAM;
902 			}
903 		}
904 	}
905 
906 	return IXGBE_SUCCESS;
907 }
908 
909 /**
910  *  ixgbe_get_pba_block_size
911  *  @hw: pointer to the HW structure
912  *  @eeprom_buf: optional pointer to EEPROM image
913  *  @eeprom_buf_size: size of EEPROM image in words
914  *  @pba_data_size: pointer to output variable
915  *
916  *  Returns the size of the PBA block in words. Function operates on EEPROM
917  *  image if the eeprom_buf pointer is not NULL otherwise it accesses physical
918  *  EEPROM device.
919  *
920  **/
921 s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
922 			     u32 eeprom_buf_size, u16 *pba_block_size)
923 {
924 	s32 ret_val;
925 	u16 pba_word[2];
926 	u16 length;
927 
928 	DEBUGFUNC("ixgbe_get_pba_block_size");
929 
930 	if (eeprom_buf == NULL) {
931 		ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
932 						     &pba_word[0]);
933 		if (ret_val)
934 			return ret_val;
935 	} else {
936 		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
937 			pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
938 			pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
939 		} else {
940 			return IXGBE_ERR_PARAM;
941 		}
942 	}
943 
944 	if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
945 		if (eeprom_buf == NULL) {
946 			ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
947 						      &length);
948 			if (ret_val)
949 				return ret_val;
950 		} else {
951 			if (eeprom_buf_size > pba_word[1])
952 				length = eeprom_buf[pba_word[1] + 0];
953 			else
954 				return IXGBE_ERR_PARAM;
955 		}
956 
957 		if (length == 0xFFFF || length == 0)
958 			return IXGBE_ERR_PBA_SECTION;
959 	} else {
960 		/* PBA number in legacy format, there is no PBA Block. */
961 		length = 0;
962 	}
963 
964 	if (pba_block_size != NULL)
965 		*pba_block_size = length;
966 
967 	return IXGBE_SUCCESS;
968 }
969 
970 /**
971  *  ixgbe_get_mac_addr_generic - Generic get MAC address
972  *  @hw: pointer to hardware structure
973  *  @mac_addr: Adapter MAC address
974  *
975  *  Reads the adapter's MAC address from first Receive Address Register (RAR0)
976  *  A reset of the adapter must be performed prior to calling this function
977  *  in order for the MAC address to have been loaded from the EEPROM into RAR0
978  **/
979 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
980 {
981 	u32 rar_high;
982 	u32 rar_low;
983 	u16 i;
984 
985 	DEBUGFUNC("ixgbe_get_mac_addr_generic");
986 
987 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
988 	rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
989 
990 	for (i = 0; i < 4; i++)
991 		mac_addr[i] = (u8)(rar_low >> (i*8));
992 
993 	for (i = 0; i < 2; i++)
994 		mac_addr[i+4] = (u8)(rar_high >> (i*8));
995 
996 	return IXGBE_SUCCESS;
997 }
998 
999 /**
1000  *  ixgbe_set_pci_config_data_generic - Generic store PCI bus info
1001  *  @hw: pointer to hardware structure
1002  *  @link_status: the link status returned by the PCI config space
1003  *
1004  *  Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure
1005  **/
1006 void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
1007 {
1008 	struct ixgbe_mac_info *mac = &hw->mac;
1009 
1010 	if (hw->bus.type == ixgbe_bus_type_unknown)
1011 		hw->bus.type = ixgbe_bus_type_pci_express;
1012 
1013 	switch (link_status & IXGBE_PCI_LINK_WIDTH) {
1014 	case IXGBE_PCI_LINK_WIDTH_1:
1015 		hw->bus.width = ixgbe_bus_width_pcie_x1;
1016 		break;
1017 	case IXGBE_PCI_LINK_WIDTH_2:
1018 		hw->bus.width = ixgbe_bus_width_pcie_x2;
1019 		break;
1020 	case IXGBE_PCI_LINK_WIDTH_4:
1021 		hw->bus.width = ixgbe_bus_width_pcie_x4;
1022 		break;
1023 	case IXGBE_PCI_LINK_WIDTH_8:
1024 		hw->bus.width = ixgbe_bus_width_pcie_x8;
1025 		break;
1026 	default:
1027 		hw->bus.width = ixgbe_bus_width_unknown;
1028 		break;
1029 	}
1030 
1031 	switch (link_status & IXGBE_PCI_LINK_SPEED) {
1032 	case IXGBE_PCI_LINK_SPEED_2500:
1033 		hw->bus.speed = ixgbe_bus_speed_2500;
1034 		break;
1035 	case IXGBE_PCI_LINK_SPEED_5000:
1036 		hw->bus.speed = ixgbe_bus_speed_5000;
1037 		break;
1038 	case IXGBE_PCI_LINK_SPEED_8000:
1039 		hw->bus.speed = ixgbe_bus_speed_8000;
1040 		break;
1041 	default:
1042 		hw->bus.speed = ixgbe_bus_speed_unknown;
1043 		break;
1044 	}
1045 
1046 	mac->ops.set_lan_id(hw);
1047 }
1048 
1049 /**
1050  *  ixgbe_get_bus_info_generic - Generic set PCI bus info
1051  *  @hw: pointer to hardware structure
1052  *
1053  *  Gets the PCI bus info (speed, width, type) then calls helper function to
1054  *  store this data within the ixgbe_hw structure.
1055  **/
1056 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
1057 {
1058 	u16 link_status;
1059 
1060 	DEBUGFUNC("ixgbe_get_bus_info_generic");
1061 
1062 	/* Get the negotiated link width and speed from PCI config space */
1063 	link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
1064 
1065 	ixgbe_set_pci_config_data_generic(hw, link_status);
1066 
1067 	return IXGBE_SUCCESS;
1068 }
1069 
1070 /**
1071  *  ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
1072  *  @hw: pointer to the HW structure
1073  *
1074  *  Determines the LAN function id by reading memory-mapped registers and swaps
1075  *  the port value if requested, and set MAC instance for devices that share
1076  *  CS4227.
1077  **/
1078 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
1079 {
1080 	struct ixgbe_bus_info *bus = &hw->bus;
1081 	u32 reg;
1082 	u16 ee_ctrl_4;
1083 
1084 	DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
1085 
1086 	reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
1087 	bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
1088 	bus->lan_id = (u8)bus->func;
1089 
1090 	/* check for a port swap */
1091 	reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
1092 	if (reg & IXGBE_FACTPS_LFS)
1093 		bus->func ^= 0x1;
1094 
1095 	/* Get MAC instance from EEPROM for configuring CS4227 */
1096 	if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
1097 		hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
1098 		bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
1099 				   IXGBE_EE_CTRL_4_INST_ID_SHIFT;
1100 	}
1101 }
1102 
1103 /**
1104  *  ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
1105  *  @hw: pointer to hardware structure
1106  *
1107  *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
1108  *  disables transmit and receive units. The adapter_stopped flag is used by
1109  *  the shared code and drivers to determine if the adapter is in a stopped
1110  *  state and should not touch the hardware.
1111  **/
1112 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
1113 {
1114 	u32 reg_val;
1115 	u16 i;
1116 
1117 	DEBUGFUNC("ixgbe_stop_adapter_generic");
1118 
1119 	/*
1120 	 * Set the adapter_stopped flag so other driver functions stop touching
1121 	 * the hardware
1122 	 */
1123 	hw->adapter_stopped = TRUE;
1124 
1125 	/* Disable the receive unit */
1126 	ixgbe_disable_rx(hw);
1127 
1128 	/* Clear interrupt mask to stop interrupts from being generated */
1129 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
1130 
1131 	/* Clear any pending interrupts, flush previous writes */
1132 	IXGBE_READ_REG(hw, IXGBE_EICR);
1133 
1134 	/* Disable the transmit unit.  Each queue must be disabled. */
1135 	for (i = 0; i < hw->mac.max_tx_queues; i++)
1136 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
1137 
1138 	/* Disable the receive unit by stopping each queue */
1139 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
1140 		reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1141 		reg_val &= ~IXGBE_RXDCTL_ENABLE;
1142 		reg_val |= IXGBE_RXDCTL_SWFLSH;
1143 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
1144 	}
1145 
1146 	/* flush all queues disables */
1147 	IXGBE_WRITE_FLUSH(hw);
1148 	msec_delay(2);
1149 
1150 	/*
1151 	 * Prevent the PCI-E bus from hanging by disabling PCI-E master
1152 	 * access and verify no pending requests
1153 	 */
1154 	return ixgbe_disable_pcie_master(hw);
1155 }
1156 
1157 /**
1158  *  ixgbe_init_led_link_act_generic - Store the LED index link/activity.
1159  *  @hw: pointer to hardware structure
1160  *
1161  *  Store the index for the link active LED. This will be used to support
1162  *  blinking the LED.
1163  **/
1164 s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
1165 {
1166 	struct ixgbe_mac_info *mac = &hw->mac;
1167 	u32 led_reg, led_mode;
1168 	u8 i;
1169 
1170 	led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1171 
1172 	/* Get LED link active from the LEDCTL register */
1173 	for (i = 0; i < 4; i++) {
1174 		led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i);
1175 
1176 		if ((led_mode & IXGBE_LED_MODE_MASK_BASE) ==
1177 		     IXGBE_LED_LINK_ACTIVE) {
1178 			mac->led_link_act = i;
1179 			return IXGBE_SUCCESS;
1180 		}
1181 	}
1182 
1183 	/*
1184 	 * If LEDCTL register does not have the LED link active set, then use
1185 	 * known MAC defaults.
1186 	 */
1187 	switch (hw->mac.type) {
1188 	case ixgbe_mac_X550EM_a:
1189 	case ixgbe_mac_X550EM_x:
1190 		mac->led_link_act = 1;
1191 		break;
1192 	default:
1193 		mac->led_link_act = 2;
1194 	}
1195 	return IXGBE_SUCCESS;
1196 }
1197 
1198 /**
1199  *  ixgbe_led_on_generic - Turns on the software controllable LEDs.
1200  *  @hw: pointer to hardware structure
1201  *  @index: led number to turn on
1202  **/
1203 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
1204 {
1205 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1206 
1207 	DEBUGFUNC("ixgbe_led_on_generic");
1208 
1209 	if (index > 3)
1210 		return IXGBE_ERR_PARAM;
1211 
1212 	/* To turn on the LED, set mode to ON. */
1213 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
1214 	led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
1215 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1216 	IXGBE_WRITE_FLUSH(hw);
1217 
1218 	return IXGBE_SUCCESS;
1219 }
1220 
1221 /**
1222  *  ixgbe_led_off_generic - Turns off the software controllable LEDs.
1223  *  @hw: pointer to hardware structure
1224  *  @index: led number to turn off
1225  **/
1226 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
1227 {
1228 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1229 
1230 	DEBUGFUNC("ixgbe_led_off_generic");
1231 
1232 	if (index > 3)
1233 		return IXGBE_ERR_PARAM;
1234 
1235 	/* To turn off the LED, set mode to OFF. */
1236 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
1237 	led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
1238 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1239 	IXGBE_WRITE_FLUSH(hw);
1240 
1241 	return IXGBE_SUCCESS;
1242 }
1243 
1244 /**
1245  *  ixgbe_init_eeprom_params_generic - Initialize EEPROM params
1246  *  @hw: pointer to hardware structure
1247  *
1248  *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
1249  *  ixgbe_hw struct in order to set up EEPROM access.
1250  **/
1251 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
1252 {
1253 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1254 	u32 eec;
1255 	u16 eeprom_size;
1256 
1257 	DEBUGFUNC("ixgbe_init_eeprom_params_generic");
1258 
1259 	if (eeprom->type == ixgbe_eeprom_uninitialized) {
1260 		eeprom->type = ixgbe_eeprom_none;
1261 		/* Set default semaphore delay to 10ms which is a well
1262 		 * tested value */
1263 		eeprom->semaphore_delay = 10;
1264 		/* Clear EEPROM page size, it will be initialized as needed */
1265 		eeprom->word_page_size = 0;
1266 
1267 		/*
1268 		 * Check for EEPROM present first.
1269 		 * If not present leave as none
1270 		 */
1271 		eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1272 		if (eec & IXGBE_EEC_PRES) {
1273 			eeprom->type = ixgbe_eeprom_spi;
1274 
1275 			/*
1276 			 * SPI EEPROM is assumed here.  This code would need to
1277 			 * change if a future EEPROM is not SPI.
1278 			 */
1279 			eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1280 					    IXGBE_EEC_SIZE_SHIFT);
1281 			eeprom->word_size = 1 << (eeprom_size +
1282 					     IXGBE_EEPROM_WORD_SIZE_SHIFT);
1283 		}
1284 
1285 		if (eec & IXGBE_EEC_ADDR_SIZE)
1286 			eeprom->address_bits = 16;
1287 		else
1288 			eeprom->address_bits = 8;
1289 		DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
1290 			  "%d\n", eeprom->type, eeprom->word_size,
1291 			  eeprom->address_bits);
1292 	}
1293 
1294 	return IXGBE_SUCCESS;
1295 }
1296 
1297 /**
1298  *  ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
1299  *  @hw: pointer to hardware structure
1300  *  @offset: offset within the EEPROM to write
1301  *  @words: number of word(s)
1302  *  @data: 16 bit word(s) to write to EEPROM
1303  *
1304  *  Reads 16 bit word(s) from EEPROM through bit-bang method
1305  **/
1306 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1307 					       u16 words, u16 *data)
1308 {
1309 	s32 status = IXGBE_SUCCESS;
1310 	u16 i, count;
1311 
1312 	DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
1313 
1314 	hw->eeprom.ops.init_params(hw);
1315 
1316 	if (words == 0) {
1317 		status = IXGBE_ERR_INVALID_ARGUMENT;
1318 		goto out;
1319 	}
1320 
1321 	if (offset + words > hw->eeprom.word_size) {
1322 		status = IXGBE_ERR_EEPROM;
1323 		goto out;
1324 	}
1325 
1326 	/*
1327 	 * The EEPROM page size cannot be queried from the chip. We do lazy
1328 	 * initialization. It is worth to do that when we write large buffer.
1329 	 */
1330 	if ((hw->eeprom.word_page_size == 0) &&
1331 	    (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
1332 		ixgbe_detect_eeprom_page_size_generic(hw, offset);
1333 
1334 	/*
1335 	 * We cannot hold synchronization semaphores for too long
1336 	 * to avoid other entity starvation. However it is more efficient
1337 	 * to read in bursts than synchronizing access for each word.
1338 	 */
1339 	for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1340 		count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1341 			IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1342 		status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
1343 							    count, &data[i]);
1344 
1345 		if (status != IXGBE_SUCCESS)
1346 			break;
1347 	}
1348 
1349 out:
1350 	return status;
1351 }
1352 
1353 /**
1354  *  ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
1355  *  @hw: pointer to hardware structure
1356  *  @offset: offset within the EEPROM to be written to
1357  *  @words: number of word(s)
1358  *  @data: 16 bit word(s) to be written to the EEPROM
1359  *
1360  *  If ixgbe_eeprom_update_checksum is not called after this function, the
1361  *  EEPROM will most likely contain an invalid checksum.
1362  **/
1363 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1364 					      u16 words, u16 *data)
1365 {
1366 	s32 status;
1367 	u16 word;
1368 	u16 page_size;
1369 	u16 i;
1370 	u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
1371 
1372 	DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
1373 
1374 	/* Prepare the EEPROM for writing  */
1375 	status = ixgbe_acquire_eeprom(hw);
1376 
1377 	if (status == IXGBE_SUCCESS) {
1378 		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1379 			ixgbe_release_eeprom(hw);
1380 			status = IXGBE_ERR_EEPROM;
1381 		}
1382 	}
1383 
1384 	if (status == IXGBE_SUCCESS) {
1385 		for (i = 0; i < words; i++) {
1386 			ixgbe_standby_eeprom(hw);
1387 
1388 			/*  Send the WRITE ENABLE command (8 bit opcode )  */
1389 			ixgbe_shift_out_eeprom_bits(hw,
1390 						   IXGBE_EEPROM_WREN_OPCODE_SPI,
1391 						   IXGBE_EEPROM_OPCODE_BITS);
1392 
1393 			ixgbe_standby_eeprom(hw);
1394 
1395 			/*
1396 			 * Some SPI eeproms use the 8th address bit embedded
1397 			 * in the opcode
1398 			 */
1399 			if ((hw->eeprom.address_bits == 8) &&
1400 			    ((offset + i) >= 128))
1401 				write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1402 
1403 			/* Send the Write command (8-bit opcode + addr) */
1404 			ixgbe_shift_out_eeprom_bits(hw, write_opcode,
1405 						    IXGBE_EEPROM_OPCODE_BITS);
1406 			ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1407 						    hw->eeprom.address_bits);
1408 
1409 			page_size = hw->eeprom.word_page_size;
1410 
1411 			/* Send the data in burst via SPI*/
1412 			do {
1413 				word = data[i];
1414 				word = (word >> 8) | (word << 8);
1415 				ixgbe_shift_out_eeprom_bits(hw, word, 16);
1416 
1417 				if (page_size == 0)
1418 					break;
1419 
1420 				/* do not wrap around page */
1421 				if (((offset + i) & (page_size - 1)) ==
1422 				    (page_size - 1))
1423 					break;
1424 			} while (++i < words);
1425 
1426 			ixgbe_standby_eeprom(hw);
1427 			msec_delay(10);
1428 		}
1429 		/* Done with writing - release the EEPROM */
1430 		ixgbe_release_eeprom(hw);
1431 	}
1432 
1433 	return status;
1434 }
1435 
1436 /**
1437  *  ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
1438  *  @hw: pointer to hardware structure
1439  *  @offset: offset within the EEPROM to be written to
1440  *  @data: 16 bit word to be written to the EEPROM
1441  *
1442  *  If ixgbe_eeprom_update_checksum is not called after this function, the
1443  *  EEPROM will most likely contain an invalid checksum.
1444  **/
1445 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1446 {
1447 	s32 status;
1448 
1449 	DEBUGFUNC("ixgbe_write_eeprom_generic");
1450 
1451 	hw->eeprom.ops.init_params(hw);
1452 
1453 	if (offset >= hw->eeprom.word_size) {
1454 		status = IXGBE_ERR_EEPROM;
1455 		goto out;
1456 	}
1457 
1458 	status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
1459 
1460 out:
1461 	return status;
1462 }
1463 
1464 /**
1465  *  ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
1466  *  @hw: pointer to hardware structure
1467  *  @offset: offset within the EEPROM to be read
1468  *  @data: read 16 bit words(s) from EEPROM
1469  *  @words: number of word(s)
1470  *
1471  *  Reads 16 bit word(s) from EEPROM through bit-bang method
1472  **/
1473 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1474 					      u16 words, u16 *data)
1475 {
1476 	s32 status = IXGBE_SUCCESS;
1477 	u16 i, count;
1478 
1479 	DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
1480 
1481 	hw->eeprom.ops.init_params(hw);
1482 
1483 	if (words == 0) {
1484 		status = IXGBE_ERR_INVALID_ARGUMENT;
1485 		goto out;
1486 	}
1487 
1488 	if (offset + words > hw->eeprom.word_size) {
1489 		status = IXGBE_ERR_EEPROM;
1490 		goto out;
1491 	}
1492 
1493 	/*
1494 	 * We cannot hold synchronization semaphores for too long
1495 	 * to avoid other entity starvation. However it is more efficient
1496 	 * to read in bursts than synchronizing access for each word.
1497 	 */
1498 	for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1499 		count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1500 			IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1501 
1502 		status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1503 							   count, &data[i]);
1504 
1505 		if (status != IXGBE_SUCCESS)
1506 			break;
1507 	}
1508 
1509 out:
1510 	return status;
1511 }
1512 
1513 /**
1514  *  ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1515  *  @hw: pointer to hardware structure
1516  *  @offset: offset within the EEPROM to be read
1517  *  @words: number of word(s)
1518  *  @data: read 16 bit word(s) from EEPROM
1519  *
1520  *  Reads 16 bit word(s) from EEPROM through bit-bang method
1521  **/
1522 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1523 					     u16 words, u16 *data)
1524 {
1525 	s32 status;
1526 	u16 word_in;
1527 	u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1528 	u16 i;
1529 
1530 	DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
1531 
1532 	/* Prepare the EEPROM for reading  */
1533 	status = ixgbe_acquire_eeprom(hw);
1534 
1535 	if (status == IXGBE_SUCCESS) {
1536 		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1537 			ixgbe_release_eeprom(hw);
1538 			status = IXGBE_ERR_EEPROM;
1539 		}
1540 	}
1541 
1542 	if (status == IXGBE_SUCCESS) {
1543 		for (i = 0; i < words; i++) {
1544 			ixgbe_standby_eeprom(hw);
1545 			/*
1546 			 * Some SPI eeproms use the 8th address bit embedded
1547 			 * in the opcode
1548 			 */
1549 			if ((hw->eeprom.address_bits == 8) &&
1550 			    ((offset + i) >= 128))
1551 				read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1552 
1553 			/* Send the READ command (opcode + addr) */
1554 			ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1555 						    IXGBE_EEPROM_OPCODE_BITS);
1556 			ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1557 						    hw->eeprom.address_bits);
1558 
1559 			/* Read the data. */
1560 			word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1561 			data[i] = (word_in >> 8) | (word_in << 8);
1562 		}
1563 
1564 		/* End this read operation */
1565 		ixgbe_release_eeprom(hw);
1566 	}
1567 
1568 	return status;
1569 }
1570 
1571 /**
1572  *  ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1573  *  @hw: pointer to hardware structure
1574  *  @offset: offset within the EEPROM to be read
1575  *  @data: read 16 bit value from EEPROM
1576  *
1577  *  Reads 16 bit value from EEPROM through bit-bang method
1578  **/
1579 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1580 				       u16 *data)
1581 {
1582 	s32 status;
1583 
1584 	DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1585 
1586 	hw->eeprom.ops.init_params(hw);
1587 
1588 	if (offset >= hw->eeprom.word_size) {
1589 		status = IXGBE_ERR_EEPROM;
1590 		goto out;
1591 	}
1592 
1593 	status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1594 
1595 out:
1596 	return status;
1597 }
1598 
1599 /**
1600  *  ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1601  *  @hw: pointer to hardware structure
1602  *  @offset: offset of word in the EEPROM to read
1603  *  @words: number of word(s)
1604  *  @data: 16 bit word(s) from the EEPROM
1605  *
1606  *  Reads a 16 bit word(s) from the EEPROM using the EERD register.
1607  **/
1608 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1609 				   u16 words, u16 *data)
1610 {
1611 	u32 eerd;
1612 	s32 status = IXGBE_SUCCESS;
1613 	u32 i;
1614 
1615 	DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
1616 
1617 	hw->eeprom.ops.init_params(hw);
1618 
1619 	if (words == 0) {
1620 		status = IXGBE_ERR_INVALID_ARGUMENT;
1621 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1622 		goto out;
1623 	}
1624 
1625 	if (offset >= hw->eeprom.word_size) {
1626 		status = IXGBE_ERR_EEPROM;
1627 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1628 		goto out;
1629 	}
1630 
1631 	for (i = 0; i < words; i++) {
1632 		eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1633 		       IXGBE_EEPROM_RW_REG_START;
1634 
1635 		IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1636 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1637 
1638 		if (status == IXGBE_SUCCESS) {
1639 			data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1640 				   IXGBE_EEPROM_RW_REG_DATA);
1641 		} else {
1642 			DEBUGOUT("Eeprom read timed out\n");
1643 			goto out;
1644 		}
1645 	}
1646 out:
1647 	return status;
1648 }
1649 
1650 /**
1651  *  ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
1652  *  @hw: pointer to hardware structure
1653  *  @offset: offset within the EEPROM to be used as a scratch pad
1654  *
1655  *  Discover EEPROM page size by writing marching data at given offset.
1656  *  This function is called only when we are writing a new large buffer
1657  *  at given offset so the data would be overwritten anyway.
1658  **/
1659 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1660 						 u16 offset)
1661 {
1662 	u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1663 	s32 status = IXGBE_SUCCESS;
1664 	u16 i;
1665 
1666 	DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
1667 
1668 	for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1669 		data[i] = i;
1670 
1671 	hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1672 	status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1673 					     IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1674 	hw->eeprom.word_page_size = 0;
1675 	if (status != IXGBE_SUCCESS)
1676 		goto out;
1677 
1678 	status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1679 	if (status != IXGBE_SUCCESS)
1680 		goto out;
1681 
1682 	/*
1683 	 * When writing in burst more than the actual page size
1684 	 * EEPROM address wraps around current page.
1685 	 */
1686 	hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1687 
1688 	DEBUGOUT1("Detected EEPROM page size = %d words.",
1689 		  hw->eeprom.word_page_size);
1690 out:
1691 	return status;
1692 }
1693 
1694 /**
1695  *  ixgbe_read_eerd_generic - Read EEPROM word using EERD
1696  *  @hw: pointer to hardware structure
1697  *  @offset: offset of  word in the EEPROM to read
1698  *  @data: word read from the EEPROM
1699  *
1700  *  Reads a 16 bit word from the EEPROM using the EERD register.
1701  **/
1702 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1703 {
1704 	return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1705 }
1706 
1707 /**
1708  *  ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1709  *  @hw: pointer to hardware structure
1710  *  @offset: offset of  word in the EEPROM to write
1711  *  @words: number of word(s)
1712  *  @data: word(s) write to the EEPROM
1713  *
1714  *  Write a 16 bit word(s) to the EEPROM using the EEWR register.
1715  **/
1716 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1717 				    u16 words, u16 *data)
1718 {
1719 	u32 eewr;
1720 	s32 status = IXGBE_SUCCESS;
1721 	u16 i;
1722 
1723 	DEBUGFUNC("ixgbe_write_eewr_generic");
1724 
1725 	hw->eeprom.ops.init_params(hw);
1726 
1727 	if (words == 0) {
1728 		status = IXGBE_ERR_INVALID_ARGUMENT;
1729 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1730 		goto out;
1731 	}
1732 
1733 	if (offset >= hw->eeprom.word_size) {
1734 		status = IXGBE_ERR_EEPROM;
1735 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1736 		goto out;
1737 	}
1738 
1739 	for (i = 0; i < words; i++) {
1740 		eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1741 			(data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1742 			IXGBE_EEPROM_RW_REG_START;
1743 
1744 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1745 		if (status != IXGBE_SUCCESS) {
1746 			DEBUGOUT("Eeprom write EEWR timed out\n");
1747 			goto out;
1748 		}
1749 
1750 		IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1751 
1752 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1753 		if (status != IXGBE_SUCCESS) {
1754 			DEBUGOUT("Eeprom write EEWR timed out\n");
1755 			goto out;
1756 		}
1757 	}
1758 
1759 out:
1760 	return status;
1761 }
1762 
1763 /**
1764  *  ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1765  *  @hw: pointer to hardware structure
1766  *  @offset: offset of  word in the EEPROM to write
1767  *  @data: word write to the EEPROM
1768  *
1769  *  Write a 16 bit word to the EEPROM using the EEWR register.
1770  **/
1771 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1772 {
1773 	return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1774 }
1775 
1776 /**
1777  *  ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1778  *  @hw: pointer to hardware structure
1779  *  @ee_reg: EEPROM flag for polling
1780  *
1781  *  Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1782  *  read or write is done respectively.
1783  **/
1784 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1785 {
1786 	u32 i;
1787 	u32 reg;
1788 	s32 status = IXGBE_ERR_EEPROM;
1789 
1790 	DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1791 
1792 	for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1793 		if (ee_reg == IXGBE_NVM_POLL_READ)
1794 			reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1795 		else
1796 			reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1797 
1798 		if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1799 			status = IXGBE_SUCCESS;
1800 			break;
1801 		}
1802 		usec_delay(5);
1803 	}
1804 
1805 	if (i == IXGBE_EERD_EEWR_ATTEMPTS)
1806 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
1807 			     "EEPROM read/write done polling timed out");
1808 
1809 	return status;
1810 }
1811 
1812 /**
1813  *  ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1814  *  @hw: pointer to hardware structure
1815  *
1816  *  Prepares EEPROM for access using bit-bang method. This function should
1817  *  be called before issuing a command to the EEPROM.
1818  **/
1819 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1820 {
1821 	s32 status = IXGBE_SUCCESS;
1822 	u32 eec;
1823 	u32 i;
1824 
1825 	DEBUGFUNC("ixgbe_acquire_eeprom");
1826 
1827 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1828 	    != IXGBE_SUCCESS)
1829 		status = IXGBE_ERR_SWFW_SYNC;
1830 
1831 	if (status == IXGBE_SUCCESS) {
1832 		eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1833 
1834 		/* Request EEPROM Access */
1835 		eec |= IXGBE_EEC_REQ;
1836 		IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1837 
1838 		for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1839 			eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1840 			if (eec & IXGBE_EEC_GNT)
1841 				break;
1842 			usec_delay(5);
1843 		}
1844 
1845 		/* Release if grant not acquired */
1846 		if (!(eec & IXGBE_EEC_GNT)) {
1847 			eec &= ~IXGBE_EEC_REQ;
1848 			IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1849 			DEBUGOUT("Could not acquire EEPROM grant\n");
1850 
1851 			hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1852 			status = IXGBE_ERR_EEPROM;
1853 		}
1854 
1855 		/* Setup EEPROM for Read/Write */
1856 		if (status == IXGBE_SUCCESS) {
1857 			/* Clear CS and SK */
1858 			eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1859 			IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1860 			IXGBE_WRITE_FLUSH(hw);
1861 			usec_delay(1);
1862 		}
1863 	}
1864 	return status;
1865 }
1866 
1867 /**
1868  *  ixgbe_get_eeprom_semaphore - Get hardware semaphore
1869  *  @hw: pointer to hardware structure
1870  *
1871  *  Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1872  **/
1873 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1874 {
1875 	s32 status = IXGBE_ERR_EEPROM;
1876 	u32 timeout = 2000;
1877 	u32 i;
1878 	u32 swsm;
1879 
1880 	DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1881 
1882 
1883 	/* Get SMBI software semaphore between device drivers first */
1884 	for (i = 0; i < timeout; i++) {
1885 		/*
1886 		 * If the SMBI bit is 0 when we read it, then the bit will be
1887 		 * set and we have the semaphore
1888 		 */
1889 		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1890 		if (!(swsm & IXGBE_SWSM_SMBI)) {
1891 			status = IXGBE_SUCCESS;
1892 			break;
1893 		}
1894 		usec_delay(50);
1895 	}
1896 
1897 	if (i == timeout) {
1898 		DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1899 			 "not granted.\n");
1900 		/*
1901 		 * this release is particularly important because our attempts
1902 		 * above to get the semaphore may have succeeded, and if there
1903 		 * was a timeout, we should unconditionally clear the semaphore
1904 		 * bits to free the driver to make progress
1905 		 */
1906 		ixgbe_release_eeprom_semaphore(hw);
1907 
1908 		usec_delay(50);
1909 		/*
1910 		 * one last try
1911 		 * If the SMBI bit is 0 when we read it, then the bit will be
1912 		 * set and we have the semaphore
1913 		 */
1914 		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1915 		if (!(swsm & IXGBE_SWSM_SMBI))
1916 			status = IXGBE_SUCCESS;
1917 	}
1918 
1919 	/* Now get the semaphore between SW/FW through the SWESMBI bit */
1920 	if (status == IXGBE_SUCCESS) {
1921 		for (i = 0; i < timeout; i++) {
1922 			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1923 
1924 			/* Set the SW EEPROM semaphore bit to request access */
1925 			swsm |= IXGBE_SWSM_SWESMBI;
1926 			IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm);
1927 
1928 			/*
1929 			 * If we set the bit successfully then we got the
1930 			 * semaphore.
1931 			 */
1932 			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1933 			if (swsm & IXGBE_SWSM_SWESMBI)
1934 				break;
1935 
1936 			usec_delay(50);
1937 		}
1938 
1939 		/*
1940 		 * Release semaphores and return error if SW EEPROM semaphore
1941 		 * was not granted because we don't have access to the EEPROM
1942 		 */
1943 		if (i >= timeout) {
1944 			ERROR_REPORT1(IXGBE_ERROR_POLLING,
1945 			    "SWESMBI Software EEPROM semaphore not granted.\n");
1946 			ixgbe_release_eeprom_semaphore(hw);
1947 			status = IXGBE_ERR_EEPROM;
1948 		}
1949 	} else {
1950 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
1951 			     "Software semaphore SMBI between device drivers "
1952 			     "not granted.\n");
1953 	}
1954 
1955 	return status;
1956 }
1957 
1958 /**
1959  *  ixgbe_release_eeprom_semaphore - Release hardware semaphore
1960  *  @hw: pointer to hardware structure
1961  *
1962  *  This function clears hardware semaphore bits.
1963  **/
1964 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1965 {
1966 	u32 swsm;
1967 
1968 	DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1969 
1970 	swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1971 
1972 	/* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1973 	swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1974 	IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1975 	IXGBE_WRITE_FLUSH(hw);
1976 }
1977 
1978 /**
1979  *  ixgbe_ready_eeprom - Polls for EEPROM ready
1980  *  @hw: pointer to hardware structure
1981  **/
1982 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1983 {
1984 	s32 status = IXGBE_SUCCESS;
1985 	u16 i;
1986 	u8 spi_stat_reg;
1987 
1988 	DEBUGFUNC("ixgbe_ready_eeprom");
1989 
1990 	/*
1991 	 * Read "Status Register" repeatedly until the LSB is cleared.  The
1992 	 * EEPROM will signal that the command has been completed by clearing
1993 	 * bit 0 of the internal status register.  If it's not cleared within
1994 	 * 5 milliseconds, then error out.
1995 	 */
1996 	for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1997 		ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1998 					    IXGBE_EEPROM_OPCODE_BITS);
1999 		spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
2000 		if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
2001 			break;
2002 
2003 		usec_delay(5);
2004 		ixgbe_standby_eeprom(hw);
2005 	}
2006 
2007 	/*
2008 	 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
2009 	 * devices (and only 0-5mSec on 5V devices)
2010 	 */
2011 	if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
2012 		DEBUGOUT("SPI EEPROM Status error\n");
2013 		status = IXGBE_ERR_EEPROM;
2014 	}
2015 
2016 	return status;
2017 }
2018 
2019 /**
2020  *  ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
2021  *  @hw: pointer to hardware structure
2022  **/
2023 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
2024 {
2025 	u32 eec;
2026 
2027 	DEBUGFUNC("ixgbe_standby_eeprom");
2028 
2029 	eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2030 
2031 	/* Toggle CS to flush commands */
2032 	eec |= IXGBE_EEC_CS;
2033 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2034 	IXGBE_WRITE_FLUSH(hw);
2035 	usec_delay(1);
2036 	eec &= ~IXGBE_EEC_CS;
2037 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2038 	IXGBE_WRITE_FLUSH(hw);
2039 	usec_delay(1);
2040 }
2041 
2042 /**
2043  *  ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
2044  *  @hw: pointer to hardware structure
2045  *  @data: data to send to the EEPROM
2046  *  @count: number of bits to shift out
2047  **/
2048 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
2049 					u16 count)
2050 {
2051 	u32 eec;
2052 	u32 mask;
2053 	u32 i;
2054 
2055 	DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
2056 
2057 	eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2058 
2059 	/*
2060 	 * Mask is used to shift "count" bits of "data" out to the EEPROM
2061 	 * one bit at a time.  Determine the starting bit based on count
2062 	 */
2063 	mask = 0x01 << (count - 1);
2064 
2065 	for (i = 0; i < count; i++) {
2066 		/*
2067 		 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
2068 		 * "1", and then raising and then lowering the clock (the SK
2069 		 * bit controls the clock input to the EEPROM).  A "0" is
2070 		 * shifted out to the EEPROM by setting "DI" to "0" and then
2071 		 * raising and then lowering the clock.
2072 		 */
2073 		if (data & mask)
2074 			eec |= IXGBE_EEC_DI;
2075 		else
2076 			eec &= ~IXGBE_EEC_DI;
2077 
2078 		IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2079 		IXGBE_WRITE_FLUSH(hw);
2080 
2081 		usec_delay(1);
2082 
2083 		ixgbe_raise_eeprom_clk(hw, &eec);
2084 		ixgbe_lower_eeprom_clk(hw, &eec);
2085 
2086 		/*
2087 		 * Shift mask to signify next bit of data to shift in to the
2088 		 * EEPROM
2089 		 */
2090 		mask = mask >> 1;
2091 	}
2092 
2093 	/* We leave the "DI" bit set to "0" when we leave this routine. */
2094 	eec &= ~IXGBE_EEC_DI;
2095 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2096 	IXGBE_WRITE_FLUSH(hw);
2097 }
2098 
2099 /**
2100  *  ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
2101  *  @hw: pointer to hardware structure
2102  **/
2103 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
2104 {
2105 	u32 eec;
2106 	u32 i;
2107 	u16 data = 0;
2108 
2109 	DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
2110 
2111 	/*
2112 	 * In order to read a register from the EEPROM, we need to shift
2113 	 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
2114 	 * the clock input to the EEPROM (setting the SK bit), and then reading
2115 	 * the value of the "DO" bit.  During this "shifting in" process the
2116 	 * "DI" bit should always be clear.
2117 	 */
2118 	eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2119 
2120 	eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
2121 
2122 	for (i = 0; i < count; i++) {
2123 		data = data << 1;
2124 		ixgbe_raise_eeprom_clk(hw, &eec);
2125 
2126 		eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2127 
2128 		eec &= ~(IXGBE_EEC_DI);
2129 		if (eec & IXGBE_EEC_DO)
2130 			data |= 1;
2131 
2132 		ixgbe_lower_eeprom_clk(hw, &eec);
2133 	}
2134 
2135 	return data;
2136 }
2137 
2138 /**
2139  *  ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
2140  *  @hw: pointer to hardware structure
2141  *  @eec: EEC register's current value
2142  **/
2143 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2144 {
2145 	DEBUGFUNC("ixgbe_raise_eeprom_clk");
2146 
2147 	/*
2148 	 * Raise the clock input to the EEPROM
2149 	 * (setting the SK bit), then delay
2150 	 */
2151 	*eec = *eec | IXGBE_EEC_SK;
2152 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
2153 	IXGBE_WRITE_FLUSH(hw);
2154 	usec_delay(1);
2155 }
2156 
2157 /**
2158  *  ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
2159  *  @hw: pointer to hardware structure
2160  *  @eecd: EECD's current value
2161  **/
2162 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2163 {
2164 	DEBUGFUNC("ixgbe_lower_eeprom_clk");
2165 
2166 	/*
2167 	 * Lower the clock input to the EEPROM (clearing the SK bit), then
2168 	 * delay
2169 	 */
2170 	*eec = *eec & ~IXGBE_EEC_SK;
2171 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
2172 	IXGBE_WRITE_FLUSH(hw);
2173 	usec_delay(1);
2174 }
2175 
2176 /**
2177  *  ixgbe_release_eeprom - Release EEPROM, release semaphores
2178  *  @hw: pointer to hardware structure
2179  **/
2180 static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
2181 {
2182 	u32 eec;
2183 
2184 	DEBUGFUNC("ixgbe_release_eeprom");
2185 
2186 	eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2187 
2188 	eec |= IXGBE_EEC_CS;  /* Pull CS high */
2189 	eec &= ~IXGBE_EEC_SK; /* Lower SCK */
2190 
2191 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2192 	IXGBE_WRITE_FLUSH(hw);
2193 
2194 	usec_delay(1);
2195 
2196 	/* Stop requesting EEPROM access */
2197 	eec &= ~IXGBE_EEC_REQ;
2198 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2199 
2200 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
2201 
2202 	/* Delay before attempt to obtain semaphore again to allow FW access */
2203 	msec_delay(hw->eeprom.semaphore_delay);
2204 }
2205 
2206 /**
2207  *  ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
2208  *  @hw: pointer to hardware structure
2209  *
2210  *  Returns a negative error code on error, or the 16-bit checksum
2211  **/
2212 s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
2213 {
2214 	u16 i;
2215 	u16 j;
2216 	u16 checksum = 0;
2217 	u16 length = 0;
2218 	u16 pointer = 0;
2219 	u16 word = 0;
2220 
2221 	DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
2222 
2223 	/* Include 0x0-0x3F in the checksum */
2224 	for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
2225 		if (hw->eeprom.ops.read(hw, i, &word)) {
2226 			DEBUGOUT("EEPROM read failed\n");
2227 			return IXGBE_ERR_EEPROM;
2228 		}
2229 		checksum += word;
2230 	}
2231 
2232 	/* Include all data from pointers except for the fw pointer */
2233 	for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
2234 		if (hw->eeprom.ops.read(hw, i, &pointer)) {
2235 			DEBUGOUT("EEPROM read failed\n");
2236 			return IXGBE_ERR_EEPROM;
2237 		}
2238 
2239 		/* If the pointer seems invalid */
2240 		if (pointer == 0xFFFF || pointer == 0)
2241 			continue;
2242 
2243 		if (hw->eeprom.ops.read(hw, pointer, &length)) {
2244 			DEBUGOUT("EEPROM read failed\n");
2245 			return IXGBE_ERR_EEPROM;
2246 		}
2247 
2248 		if (length == 0xFFFF || length == 0)
2249 			continue;
2250 
2251 		for (j = pointer + 1; j <= pointer + length; j++) {
2252 			if (hw->eeprom.ops.read(hw, j, &word)) {
2253 				DEBUGOUT("EEPROM read failed\n");
2254 				return IXGBE_ERR_EEPROM;
2255 			}
2256 			checksum += word;
2257 		}
2258 	}
2259 
2260 	checksum = (u16)IXGBE_EEPROM_SUM - checksum;
2261 
2262 	return (s32)checksum;
2263 }
2264 
2265 /**
2266  *  ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
2267  *  @hw: pointer to hardware structure
2268  *  @checksum_val: calculated checksum
2269  *
2270  *  Performs checksum calculation and validates the EEPROM checksum.  If the
2271  *  caller does not need checksum_val, the value can be NULL.
2272  **/
2273 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
2274 					   u16 *checksum_val)
2275 {
2276 	s32 status;
2277 	u16 checksum;
2278 	u16 read_checksum = 0;
2279 
2280 	DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
2281 
2282 	/* Read the first word from the EEPROM. If this times out or fails, do
2283 	 * not continue or we could be in for a very long wait while every
2284 	 * EEPROM read fails
2285 	 */
2286 	status = hw->eeprom.ops.read(hw, 0, &checksum);
2287 	if (status) {
2288 		DEBUGOUT("EEPROM read failed\n");
2289 		return status;
2290 	}
2291 
2292 	status = hw->eeprom.ops.calc_checksum(hw);
2293 	if (status < 0)
2294 		return status;
2295 
2296 	checksum = (u16)(status & 0xffff);
2297 
2298 	status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
2299 	if (status) {
2300 		DEBUGOUT("EEPROM read failed\n");
2301 		return status;
2302 	}
2303 
2304 	/* Verify read checksum from EEPROM is the same as
2305 	 * calculated checksum
2306 	 */
2307 	if (read_checksum != checksum)
2308 		status = IXGBE_ERR_EEPROM_CHECKSUM;
2309 
2310 	/* If the user cares, return the calculated checksum */
2311 	if (checksum_val)
2312 		*checksum_val = checksum;
2313 
2314 	return status;
2315 }
2316 
2317 /**
2318  *  ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
2319  *  @hw: pointer to hardware structure
2320  **/
2321 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
2322 {
2323 	s32 status;
2324 	u16 checksum;
2325 
2326 	DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
2327 
2328 	/* Read the first word from the EEPROM. If this times out or fails, do
2329 	 * not continue or we could be in for a very long wait while every
2330 	 * EEPROM read fails
2331 	 */
2332 	status = hw->eeprom.ops.read(hw, 0, &checksum);
2333 	if (status) {
2334 		DEBUGOUT("EEPROM read failed\n");
2335 		return status;
2336 	}
2337 
2338 	status = hw->eeprom.ops.calc_checksum(hw);
2339 	if (status < 0)
2340 		return status;
2341 
2342 	checksum = (u16)(status & 0xffff);
2343 
2344 	status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
2345 
2346 	return status;
2347 }
2348 
2349 /**
2350  *  ixgbe_validate_mac_addr - Validate MAC address
2351  *  @mac_addr: pointer to MAC address.
2352  *
2353  *  Tests a MAC address to ensure it is a valid Individual Address.
2354  **/
2355 s32 ixgbe_validate_mac_addr(u8 *mac_addr)
2356 {
2357 	s32 status = IXGBE_SUCCESS;
2358 
2359 	DEBUGFUNC("ixgbe_validate_mac_addr");
2360 
2361 	/* Make sure it is not a multicast address */
2362 	if (IXGBE_IS_MULTICAST(mac_addr)) {
2363 		status = IXGBE_ERR_INVALID_MAC_ADDR;
2364 	/* Not a broadcast address */
2365 	} else if (IXGBE_IS_BROADCAST(mac_addr)) {
2366 		status = IXGBE_ERR_INVALID_MAC_ADDR;
2367 	/* Reject the zero address */
2368 	} else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
2369 		   mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
2370 		status = IXGBE_ERR_INVALID_MAC_ADDR;
2371 	}
2372 	return status;
2373 }
2374 
2375 /**
2376  *  ixgbe_set_rar_generic - Set Rx address register
2377  *  @hw: pointer to hardware structure
2378  *  @index: Receive address register to write
2379  *  @addr: Address to put into receive address register
2380  *  @vmdq: VMDq "set" or "pool" index
2381  *  @enable_addr: set flag that address is active
2382  *
2383  *  Puts an ethernet address into a receive address register.
2384  **/
2385 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
2386 			  u32 enable_addr)
2387 {
2388 	u32 rar_low, rar_high;
2389 	u32 rar_entries = hw->mac.num_rar_entries;
2390 
2391 	DEBUGFUNC("ixgbe_set_rar_generic");
2392 
2393 	/* Make sure we are using a valid rar index range */
2394 	if (index >= rar_entries) {
2395 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2396 			     "RAR index %d is out of range.\n", index);
2397 		return IXGBE_ERR_INVALID_ARGUMENT;
2398 	}
2399 
2400 	/* setup VMDq pool selection before this RAR gets enabled */
2401 	hw->mac.ops.set_vmdq(hw, index, vmdq);
2402 
2403 	/*
2404 	 * HW expects these in little endian so we reverse the byte
2405 	 * order from network order (big endian) to little endian
2406 	 */
2407 	rar_low = ((u32)addr[0] |
2408 		   ((u32)addr[1] << 8) |
2409 		   ((u32)addr[2] << 16) |
2410 		   ((u32)addr[3] << 24));
2411 	/*
2412 	 * Some parts put the VMDq setting in the extra RAH bits,
2413 	 * so save everything except the lower 16 bits that hold part
2414 	 * of the address and the address valid bit.
2415 	 */
2416 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2417 	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2418 	rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
2419 
2420 	if (enable_addr != 0)
2421 		rar_high |= IXGBE_RAH_AV;
2422 
2423 	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
2424 	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2425 
2426 	return IXGBE_SUCCESS;
2427 }
2428 
2429 /**
2430  *  ixgbe_clear_rar_generic - Remove Rx address register
2431  *  @hw: pointer to hardware structure
2432  *  @index: Receive address register to write
2433  *
2434  *  Clears an ethernet address from a receive address register.
2435  **/
2436 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
2437 {
2438 	u32 rar_high;
2439 	u32 rar_entries = hw->mac.num_rar_entries;
2440 
2441 	DEBUGFUNC("ixgbe_clear_rar_generic");
2442 
2443 	/* Make sure we are using a valid rar index range */
2444 	if (index >= rar_entries) {
2445 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2446 			     "RAR index %d is out of range.\n", index);
2447 		return IXGBE_ERR_INVALID_ARGUMENT;
2448 	}
2449 
2450 	/*
2451 	 * Some parts put the VMDq setting in the extra RAH bits,
2452 	 * so save everything except the lower 16 bits that hold part
2453 	 * of the address and the address valid bit.
2454 	 */
2455 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2456 	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2457 
2458 	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
2459 	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2460 
2461 	/* clear VMDq pool/queue selection for this RAR */
2462 	hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
2463 
2464 	return IXGBE_SUCCESS;
2465 }
2466 
2467 /**
2468  *  ixgbe_init_rx_addrs_generic - Initializes receive address filters.
2469  *  @hw: pointer to hardware structure
2470  *
2471  *  Places the MAC address in receive address register 0 and clears the rest
2472  *  of the receive address registers. Clears the multicast table. Assumes
2473  *  the receiver is in reset when the routine is called.
2474  **/
2475 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
2476 {
2477 	u32 i;
2478 	u32 rar_entries = hw->mac.num_rar_entries;
2479 
2480 	DEBUGFUNC("ixgbe_init_rx_addrs_generic");
2481 
2482 	/*
2483 	 * If the current mac address is valid, assume it is a software override
2484 	 * to the permanent address.
2485 	 * Otherwise, use the permanent address from the eeprom.
2486 	 */
2487 	if (ixgbe_validate_mac_addr(hw->mac.addr) ==
2488 	    IXGBE_ERR_INVALID_MAC_ADDR) {
2489 		/* Get the MAC address from the RAR0 for later reference */
2490 		hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2491 
2492 		DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
2493 			  hw->mac.addr[0], hw->mac.addr[1],
2494 			  hw->mac.addr[2]);
2495 		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2496 			  hw->mac.addr[4], hw->mac.addr[5]);
2497 	} else {
2498 		/* Setup the receive address. */
2499 		DEBUGOUT("Overriding MAC Address in RAR[0]\n");
2500 		DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
2501 			  hw->mac.addr[0], hw->mac.addr[1],
2502 			  hw->mac.addr[2]);
2503 		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2504 			  hw->mac.addr[4], hw->mac.addr[5]);
2505 
2506 		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2507 	}
2508 
2509 	/* clear VMDq pool/queue selection for RAR 0 */
2510 	hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
2511 
2512 	hw->addr_ctrl.overflow_promisc = 0;
2513 
2514 	hw->addr_ctrl.rar_used_count = 1;
2515 
2516 	/* Zero out the other receive addresses. */
2517 	DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
2518 	for (i = 1; i < rar_entries; i++) {
2519 		IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
2520 		IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
2521 	}
2522 
2523 	/* Clear the MTA */
2524 	hw->addr_ctrl.mta_in_use = 0;
2525 	IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2526 
2527 	DEBUGOUT(" Clearing MTA\n");
2528 	for (i = 0; i < hw->mac.mcft_size; i++)
2529 		IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
2530 
2531 	ixgbe_init_uta_tables(hw);
2532 
2533 	return IXGBE_SUCCESS;
2534 }
2535 
2536 /**
2537  *  ixgbe_add_uc_addr - Adds a secondary unicast address.
2538  *  @hw: pointer to hardware structure
2539  *  @addr: new address
2540  *
2541  *  Adds it to unused receive address register or goes into promiscuous mode.
2542  **/
2543 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
2544 {
2545 	u32 rar_entries = hw->mac.num_rar_entries;
2546 	u32 rar;
2547 
2548 	DEBUGFUNC("ixgbe_add_uc_addr");
2549 
2550 	DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
2551 		  addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
2552 
2553 	/*
2554 	 * Place this address in the RAR if there is room,
2555 	 * else put the controller into promiscuous mode
2556 	 */
2557 	if (hw->addr_ctrl.rar_used_count < rar_entries) {
2558 		rar = hw->addr_ctrl.rar_used_count;
2559 		hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2560 		DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
2561 		hw->addr_ctrl.rar_used_count++;
2562 	} else {
2563 		hw->addr_ctrl.overflow_promisc++;
2564 	}
2565 
2566 	DEBUGOUT("ixgbe_add_uc_addr Complete\n");
2567 }
2568 
2569 /**
2570  *  ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
2571  *  @hw: pointer to hardware structure
2572  *  @addr_list: the list of new addresses
2573  *  @addr_count: number of addresses
2574  *  @next: iterator function to walk the address list
2575  *
2576  *  The given list replaces any existing list.  Clears the secondary addrs from
2577  *  receive address registers.  Uses unused receive address registers for the
2578  *  first secondary addresses, and falls back to promiscuous mode as needed.
2579  *
2580  *  Drivers using secondary unicast addresses must set user_set_promisc when
2581  *  manually putting the device into promiscuous mode.
2582  **/
2583 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
2584 				      u32 addr_count, ixgbe_mc_addr_itr next)
2585 {
2586 	u8 *addr;
2587 	u32 i;
2588 	u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
2589 	u32 uc_addr_in_use;
2590 	u32 fctrl;
2591 	u32 vmdq;
2592 
2593 	DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
2594 
2595 	/*
2596 	 * Clear accounting of old secondary address list,
2597 	 * don't count RAR[0]
2598 	 */
2599 	uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
2600 	hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
2601 	hw->addr_ctrl.overflow_promisc = 0;
2602 
2603 	/* Zero out the other receive addresses */
2604 	DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
2605 	for (i = 0; i < uc_addr_in_use; i++) {
2606 		IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
2607 		IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
2608 	}
2609 
2610 	/* Add the new addresses */
2611 	for (i = 0; i < addr_count; i++) {
2612 		DEBUGOUT(" Adding the secondary addresses:\n");
2613 		addr = next(hw, &addr_list, &vmdq);
2614 		ixgbe_add_uc_addr(hw, addr, vmdq);
2615 	}
2616 
2617 	if (hw->addr_ctrl.overflow_promisc) {
2618 		/* enable promisc if not already in overflow or set by user */
2619 		if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2620 			DEBUGOUT(" Entering address overflow promisc mode\n");
2621 			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2622 			fctrl |= IXGBE_FCTRL_UPE;
2623 			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2624 		}
2625 	} else {
2626 		/* only disable if set by overflow, not by user */
2627 		if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2628 			DEBUGOUT(" Leaving address overflow promisc mode\n");
2629 			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2630 			fctrl &= ~IXGBE_FCTRL_UPE;
2631 			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2632 		}
2633 	}
2634 
2635 	DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
2636 	return IXGBE_SUCCESS;
2637 }
2638 
2639 /**
2640  *  ixgbe_mta_vector - Determines bit-vector in multicast table to set
2641  *  @hw: pointer to hardware structure
2642  *  @mc_addr: the multicast address
2643  *
2644  *  Extracts the 12 bits, from a multicast address, to determine which
2645  *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
2646  *  incoming rx multicast addresses, to determine the bit-vector to check in
2647  *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
2648  *  by the MO field of the MCSTCTRL. The MO field is set during initialization
2649  *  to mc_filter_type.
2650  **/
2651 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
2652 {
2653 	u32 vector = 0;
2654 
2655 	DEBUGFUNC("ixgbe_mta_vector");
2656 
2657 	switch (hw->mac.mc_filter_type) {
2658 	case 0:   /* use bits [47:36] of the address */
2659 		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
2660 		break;
2661 	case 1:   /* use bits [46:35] of the address */
2662 		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
2663 		break;
2664 	case 2:   /* use bits [45:34] of the address */
2665 		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
2666 		break;
2667 	case 3:   /* use bits [43:32] of the address */
2668 		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
2669 		break;
2670 	default:  /* Invalid mc_filter_type */
2671 		DEBUGOUT("MC filter type param set incorrectly\n");
2672 		ASSERT(0);
2673 		break;
2674 	}
2675 
2676 	/* vector can only be 12-bits or boundary will be exceeded */
2677 	vector &= 0xFFF;
2678 	return vector;
2679 }
2680 
2681 /**
2682  *  ixgbe_set_mta - Set bit-vector in multicast table
2683  *  @hw: pointer to hardware structure
2684  *  @hash_value: Multicast address hash value
2685  *
2686  *  Sets the bit-vector in the multicast table.
2687  **/
2688 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
2689 {
2690 	u32 vector;
2691 	u32 vector_bit;
2692 	u32 vector_reg;
2693 
2694 	DEBUGFUNC("ixgbe_set_mta");
2695 
2696 	hw->addr_ctrl.mta_in_use++;
2697 
2698 	vector = ixgbe_mta_vector(hw, mc_addr);
2699 	DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
2700 
2701 	/*
2702 	 * The MTA is a register array of 128 32-bit registers. It is treated
2703 	 * like an array of 4096 bits.  We want to set bit
2704 	 * BitArray[vector_value]. So we figure out what register the bit is
2705 	 * in, read it, OR in the new bit, then write back the new value.  The
2706 	 * register is determined by the upper 7 bits of the vector value and
2707 	 * the bit within that register are determined by the lower 5 bits of
2708 	 * the value.
2709 	 */
2710 	vector_reg = (vector >> 5) & 0x7F;
2711 	vector_bit = vector & 0x1F;
2712 	hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2713 }
2714 
2715 /**
2716  *  ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2717  *  @hw: pointer to hardware structure
2718  *  @mc_addr_list: the list of new multicast addresses
2719  *  @mc_addr_count: number of addresses
2720  *  @next: iterator function to walk the multicast address list
2721  *  @clear: flag, when set clears the table beforehand
2722  *
2723  *  When the clear flag is set, the given list replaces any existing list.
2724  *  Hashes the given addresses into the multicast table.
2725  **/
2726 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
2727 				      u32 mc_addr_count, ixgbe_mc_addr_itr next,
2728 				      bool clear)
2729 {
2730 	u32 i;
2731 	u32 vmdq;
2732 
2733 	DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2734 
2735 	/*
2736 	 * Set the new number of MC addresses that we are being requested to
2737 	 * use.
2738 	 */
2739 	hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2740 	hw->addr_ctrl.mta_in_use = 0;
2741 
2742 	/* Clear mta_shadow */
2743 	if (clear) {
2744 		DEBUGOUT(" Clearing MTA\n");
2745 		memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2746 	}
2747 
2748 	/* Update mta_shadow */
2749 	for (i = 0; i < mc_addr_count; i++) {
2750 		DEBUGOUT(" Adding the multicast addresses:\n");
2751 		ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2752 	}
2753 
2754 	/* Enable mta */
2755 	for (i = 0; i < hw->mac.mcft_size; i++)
2756 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2757 				      hw->mac.mta_shadow[i]);
2758 
2759 	if (hw->addr_ctrl.mta_in_use > 0)
2760 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2761 				IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2762 
2763 	DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2764 	return IXGBE_SUCCESS;
2765 }
2766 
2767 /**
2768  *  ixgbe_enable_mc_generic - Enable multicast address in RAR
2769  *  @hw: pointer to hardware structure
2770  *
2771  *  Enables multicast address in RAR and the use of the multicast hash table.
2772  **/
2773 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2774 {
2775 	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2776 
2777 	DEBUGFUNC("ixgbe_enable_mc_generic");
2778 
2779 	if (a->mta_in_use > 0)
2780 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2781 				hw->mac.mc_filter_type);
2782 
2783 	return IXGBE_SUCCESS;
2784 }
2785 
2786 /**
2787  *  ixgbe_disable_mc_generic - Disable multicast address in RAR
2788  *  @hw: pointer to hardware structure
2789  *
2790  *  Disables multicast address in RAR and the use of the multicast hash table.
2791  **/
2792 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2793 {
2794 	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2795 
2796 	DEBUGFUNC("ixgbe_disable_mc_generic");
2797 
2798 	if (a->mta_in_use > 0)
2799 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2800 
2801 	return IXGBE_SUCCESS;
2802 }
2803 
2804 /**
2805  *  ixgbe_fc_enable_generic - Enable flow control
2806  *  @hw: pointer to hardware structure
2807  *
2808  *  Enable flow control according to the current settings.
2809  **/
2810 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2811 {
2812 	s32 ret_val = IXGBE_SUCCESS;
2813 	u32 mflcn_reg, fccfg_reg;
2814 	u32 reg;
2815 	u32 fcrtl, fcrth;
2816 	int i;
2817 
2818 	DEBUGFUNC("ixgbe_fc_enable_generic");
2819 
2820 	/* Validate the water mark configuration */
2821 	if (!hw->fc.pause_time) {
2822 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2823 		goto out;
2824 	}
2825 
2826 	/* Low water mark of zero causes XOFF floods */
2827 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2828 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2829 		    hw->fc.high_water[i]) {
2830 			if (!hw->fc.low_water[i] ||
2831 			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2832 				DEBUGOUT("Invalid water mark configuration\n");
2833 				ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2834 				goto out;
2835 			}
2836 		}
2837 	}
2838 
2839 	/* Negotiate the fc mode to use */
2840 	hw->mac.ops.fc_autoneg(hw);
2841 
2842 	/* Disable any previous flow control settings */
2843 	mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2844 	mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2845 
2846 	fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2847 	fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2848 
2849 	/*
2850 	 * The possible values of fc.current_mode are:
2851 	 * 0: Flow control is completely disabled
2852 	 * 1: Rx flow control is enabled (we can receive pause frames,
2853 	 *    but not send pause frames).
2854 	 * 2: Tx flow control is enabled (we can send pause frames but
2855 	 *    we do not support receiving pause frames).
2856 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2857 	 * other: Invalid.
2858 	 */
2859 	switch (hw->fc.current_mode) {
2860 	case ixgbe_fc_none:
2861 		/*
2862 		 * Flow control is disabled by software override or autoneg.
2863 		 * The code below will actually disable it in the HW.
2864 		 */
2865 		break;
2866 	case ixgbe_fc_rx_pause:
2867 		/*
2868 		 * Rx Flow control is enabled and Tx Flow control is
2869 		 * disabled by software override. Since there really
2870 		 * isn't a way to advertise that we are capable of RX
2871 		 * Pause ONLY, we will advertise that we support both
2872 		 * symmetric and asymmetric Rx PAUSE.  Later, we will
2873 		 * disable the adapter's ability to send PAUSE frames.
2874 		 */
2875 		mflcn_reg |= IXGBE_MFLCN_RFCE;
2876 		break;
2877 	case ixgbe_fc_tx_pause:
2878 		/*
2879 		 * Tx Flow control is enabled, and Rx Flow control is
2880 		 * disabled by software override.
2881 		 */
2882 		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2883 		break;
2884 	case ixgbe_fc_full:
2885 		/* Flow control (both Rx and Tx) is enabled by SW override. */
2886 		mflcn_reg |= IXGBE_MFLCN_RFCE;
2887 		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2888 		break;
2889 	default:
2890 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
2891 			     "Flow control param set incorrectly\n");
2892 		ret_val = IXGBE_ERR_CONFIG;
2893 		goto out;
2894 		break;
2895 	}
2896 
2897 	/* Set 802.3x based flow control settings. */
2898 	mflcn_reg |= IXGBE_MFLCN_DPF;
2899 	IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2900 	IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2901 
2902 
2903 	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
2904 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2905 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2906 		    hw->fc.high_water[i]) {
2907 			fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2908 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2909 			fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2910 		} else {
2911 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2912 			/*
2913 			 * In order to prevent Tx hangs when the internal Tx
2914 			 * switch is enabled we must set the high water mark
2915 			 * to the Rx packet buffer size - 24KB.  This allows
2916 			 * the Tx switch to function even under heavy Rx
2917 			 * workloads.
2918 			 */
2919 			fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
2920 		}
2921 
2922 		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2923 	}
2924 
2925 	/* Configure pause time (2 TCs per register) */
2926 	reg = hw->fc.pause_time * 0x00010001;
2927 	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2928 		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2929 
2930 	/* Configure flow control refresh threshold value */
2931 	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2932 
2933 out:
2934 	return ret_val;
2935 }
2936 
2937 /**
2938  *  ixgbe_negotiate_fc - Negotiate flow control
2939  *  @hw: pointer to hardware structure
2940  *  @adv_reg: flow control advertised settings
2941  *  @lp_reg: link partner's flow control settings
2942  *  @adv_sym: symmetric pause bit in advertisement
2943  *  @adv_asm: asymmetric pause bit in advertisement
2944  *  @lp_sym: symmetric pause bit in link partner advertisement
2945  *  @lp_asm: asymmetric pause bit in link partner advertisement
2946  *
2947  *  Find the intersection between advertised settings and link partner's
2948  *  advertised settings
2949  **/
2950 s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2951 		       u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2952 {
2953 	if ((!(adv_reg)) ||  (!(lp_reg))) {
2954 		ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
2955 			     "Local or link partner's advertised flow control "
2956 			     "settings are NULL. Local: %x, link partner: %x\n",
2957 			     adv_reg, lp_reg);
2958 		return IXGBE_ERR_FC_NOT_NEGOTIATED;
2959 	}
2960 
2961 	if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2962 		/*
2963 		 * Now we need to check if the user selected Rx ONLY
2964 		 * of pause frames.  In this case, we had to advertise
2965 		 * FULL flow control because we could not advertise RX
2966 		 * ONLY. Hence, we must now check to see if we need to
2967 		 * turn OFF the TRANSMISSION of PAUSE frames.
2968 		 */
2969 		if (hw->fc.requested_mode == ixgbe_fc_full) {
2970 			hw->fc.current_mode = ixgbe_fc_full;
2971 			DEBUGOUT("Flow Control = FULL.\n");
2972 		} else {
2973 			hw->fc.current_mode = ixgbe_fc_rx_pause;
2974 			DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2975 		}
2976 	} else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2977 		   (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2978 		hw->fc.current_mode = ixgbe_fc_tx_pause;
2979 		DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2980 	} else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2981 		   !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2982 		hw->fc.current_mode = ixgbe_fc_rx_pause;
2983 		DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2984 	} else {
2985 		hw->fc.current_mode = ixgbe_fc_none;
2986 		DEBUGOUT("Flow Control = NONE.\n");
2987 	}
2988 	return IXGBE_SUCCESS;
2989 }
2990 
2991 /**
2992  *  ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2993  *  @hw: pointer to hardware structure
2994  *
2995  *  Enable flow control according on 1 gig fiber.
2996  **/
2997 static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2998 {
2999 	u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
3000 	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3001 
3002 	/*
3003 	 * On multispeed fiber at 1g, bail out if
3004 	 * - link is up but AN did not complete, or if
3005 	 * - link is up and AN completed but timed out
3006 	 */
3007 
3008 	linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
3009 	if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
3010 	    (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
3011 		DEBUGOUT("Auto-Negotiation did not complete or timed out\n");
3012 		goto out;
3013 	}
3014 
3015 	pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
3016 	pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
3017 
3018 	ret_val =  ixgbe_negotiate_fc(hw, pcs_anadv_reg,
3019 				      pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
3020 				      IXGBE_PCS1GANA_ASM_PAUSE,
3021 				      IXGBE_PCS1GANA_SYM_PAUSE,
3022 				      IXGBE_PCS1GANA_ASM_PAUSE);
3023 
3024 out:
3025 	return ret_val;
3026 }
3027 
3028 /**
3029  *  ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
3030  *  @hw: pointer to hardware structure
3031  *
3032  *  Enable flow control according to IEEE clause 37.
3033  **/
3034 static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
3035 {
3036 	u32 links2, anlp1_reg, autoc_reg, links;
3037 	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3038 
3039 	/*
3040 	 * On backplane, bail out if
3041 	 * - backplane autoneg was not completed, or if
3042 	 * - we are 82599 and link partner is not AN enabled
3043 	 */
3044 	links = IXGBE_READ_REG(hw, IXGBE_LINKS);
3045 	if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
3046 		DEBUGOUT("Auto-Negotiation did not complete\n");
3047 		goto out;
3048 	}
3049 
3050 	if (hw->mac.type == ixgbe_mac_82599EB) {
3051 		links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
3052 		if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
3053 			DEBUGOUT("Link partner is not AN enabled\n");
3054 			goto out;
3055 		}
3056 	}
3057 	/*
3058 	 * Read the 10g AN autoc and LP ability registers and resolve
3059 	 * local flow control settings accordingly
3060 	 */
3061 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3062 	anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
3063 
3064 	ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
3065 		anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
3066 		IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
3067 
3068 out:
3069 	return ret_val;
3070 }
3071 
3072 /**
3073  *  ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
3074  *  @hw: pointer to hardware structure
3075  *
3076  *  Enable flow control according to IEEE clause 37.
3077  **/
3078 static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
3079 {
3080 	u16 technology_ability_reg = 0;
3081 	u16 lp_technology_ability_reg = 0;
3082 
3083 	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
3084 			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3085 			     &technology_ability_reg);
3086 	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
3087 			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3088 			     &lp_technology_ability_reg);
3089 
3090 	return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
3091 				  (u32)lp_technology_ability_reg,
3092 				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
3093 				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
3094 }
3095 
3096 /**
3097  *  ixgbe_fc_autoneg - Configure flow control
3098  *  @hw: pointer to hardware structure
3099  *
3100  *  Compares our advertised flow control capabilities to those advertised by
3101  *  our link partner, and determines the proper flow control mode to use.
3102  **/
3103 void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
3104 {
3105 	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3106 	ixgbe_link_speed speed;
3107 	bool link_up;
3108 
3109 	DEBUGFUNC("ixgbe_fc_autoneg");
3110 
3111 	/*
3112 	 * AN should have completed when the cable was plugged in.
3113 	 * Look for reasons to bail out.  Bail out if:
3114 	 * - FC autoneg is disabled, or if
3115 	 * - link is not up.
3116 	 */
3117 	if (hw->fc.disable_fc_autoneg) {
3118 		ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
3119 			     "Flow control autoneg is disabled");
3120 		goto out;
3121 	}
3122 
3123 	hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
3124 	if (!link_up) {
3125 		ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
3126 		goto out;
3127 	}
3128 
3129 	switch (hw->phy.media_type) {
3130 	/* Autoneg flow control on fiber adapters */
3131 	case ixgbe_media_type_fiber_fixed:
3132 	case ixgbe_media_type_fiber_qsfp:
3133 	case ixgbe_media_type_fiber:
3134 		if (speed == IXGBE_LINK_SPEED_1GB_FULL)
3135 			ret_val = ixgbe_fc_autoneg_fiber(hw);
3136 		break;
3137 
3138 	/* Autoneg flow control on backplane adapters */
3139 	case ixgbe_media_type_backplane:
3140 		ret_val = ixgbe_fc_autoneg_backplane(hw);
3141 		break;
3142 
3143 	/* Autoneg flow control on copper adapters */
3144 	case ixgbe_media_type_copper:
3145 		if (ixgbe_device_supports_autoneg_fc(hw))
3146 			ret_val = ixgbe_fc_autoneg_copper(hw);
3147 		break;
3148 
3149 	default:
3150 		break;
3151 	}
3152 
3153 out:
3154 	if (ret_val == IXGBE_SUCCESS) {
3155 		hw->fc.fc_was_autonegged = TRUE;
3156 	} else {
3157 		hw->fc.fc_was_autonegged = FALSE;
3158 		hw->fc.current_mode = hw->fc.requested_mode;
3159 	}
3160 }
3161 
3162 /*
3163  * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
3164  * @hw: pointer to hardware structure
3165  *
3166  * System-wide timeout range is encoded in PCIe Device Control2 register.
3167  *
3168  * Add 10% to specified maximum and return the number of times to poll for
3169  * completion timeout, in units of 100 microsec.  Never return less than
3170  * 800 = 80 millisec.
3171  */
3172 static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
3173 {
3174 	s16 devctl2;
3175 	u32 pollcnt;
3176 
3177 	devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
3178 	devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
3179 
3180 	switch (devctl2) {
3181 	case IXGBE_PCIDEVCTRL2_65_130ms:
3182 		pollcnt = 1300;		/* 130 millisec */
3183 		break;
3184 	case IXGBE_PCIDEVCTRL2_260_520ms:
3185 		pollcnt = 5200;		/* 520 millisec */
3186 		break;
3187 	case IXGBE_PCIDEVCTRL2_1_2s:
3188 		pollcnt = 20000;	/* 2 sec */
3189 		break;
3190 	case IXGBE_PCIDEVCTRL2_4_8s:
3191 		pollcnt = 80000;	/* 8 sec */
3192 		break;
3193 	case IXGBE_PCIDEVCTRL2_17_34s:
3194 		pollcnt = 34000;	/* 34 sec */
3195 		break;
3196 	case IXGBE_PCIDEVCTRL2_50_100us:	/* 100 microsecs */
3197 	case IXGBE_PCIDEVCTRL2_1_2ms:		/* 2 millisecs */
3198 	case IXGBE_PCIDEVCTRL2_16_32ms:		/* 32 millisec */
3199 	case IXGBE_PCIDEVCTRL2_16_32ms_def:	/* 32 millisec default */
3200 	default:
3201 		pollcnt = 800;		/* 80 millisec minimum */
3202 		break;
3203 	}
3204 
3205 	/* add 10% to spec maximum */
3206 	return (pollcnt * 11) / 10;
3207 }
3208 
3209 /**
3210  *  ixgbe_disable_pcie_master - Disable PCI-express master access
3211  *  @hw: pointer to hardware structure
3212  *
3213  *  Disables PCI-Express master access and verifies there are no pending
3214  *  requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
3215  *  bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
3216  *  is returned signifying master requests disabled.
3217  **/
3218 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
3219 {
3220 	s32 status = IXGBE_SUCCESS;
3221 	u32 i, poll;
3222 	u16 value;
3223 
3224 	DEBUGFUNC("ixgbe_disable_pcie_master");
3225 
3226 	/* Always set this bit to ensure any future transactions are blocked */
3227 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
3228 
3229 	/* Exit if master requests are blocked */
3230 	if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
3231 	    IXGBE_REMOVED(hw->hw_addr))
3232 		goto out;
3233 
3234 	/* Poll for master request bit to clear */
3235 	for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
3236 		usec_delay(100);
3237 		if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
3238 			goto out;
3239 	}
3240 
3241 	/*
3242 	 * Two consecutive resets are required via CTRL.RST per datasheet
3243 	 * 5.2.5.3.2 Master Disable.  We set a flag to inform the reset routine
3244 	 * of this need.  The first reset prevents new master requests from
3245 	 * being issued by our device.  We then must wait 1usec or more for any
3246 	 * remaining completions from the PCIe bus to trickle in, and then reset
3247 	 * again to clear out any effects they may have had on our device.
3248 	 */
3249 	DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
3250 	hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
3251 
3252 	if (hw->mac.type >= ixgbe_mac_X550)
3253 		goto out;
3254 
3255 	/*
3256 	 * Before proceeding, make sure that the PCIe block does not have
3257 	 * transactions pending.
3258 	 */
3259 	poll = ixgbe_pcie_timeout_poll(hw);
3260 	for (i = 0; i < poll; i++) {
3261 		usec_delay(100);
3262 		value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
3263 		if (IXGBE_REMOVED(hw->hw_addr))
3264 			goto out;
3265 		if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
3266 			goto out;
3267 	}
3268 
3269 	ERROR_REPORT1(IXGBE_ERROR_POLLING,
3270 		     "PCIe transaction pending bit also did not clear.\n");
3271 	status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
3272 
3273 out:
3274 	return status;
3275 }
3276 
3277 /**
3278  *  ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
3279  *  @hw: pointer to hardware structure
3280  *  @mask: Mask to specify which semaphore to acquire
3281  *
3282  *  Acquires the SWFW semaphore through the GSSR register for the specified
3283  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
3284  **/
3285 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
3286 {
3287 	u32 gssr = 0;
3288 	u32 swmask = mask;
3289 	u32 fwmask = mask << 5;
3290 	u32 timeout = 200;
3291 	u32 i;
3292 
3293 	DEBUGFUNC("ixgbe_acquire_swfw_sync");
3294 
3295 	for (i = 0; i < timeout; i++) {
3296 		/*
3297 		 * SW NVM semaphore bit is used for access to all
3298 		 * SW_FW_SYNC bits (not just NVM)
3299 		 */
3300 		if (ixgbe_get_eeprom_semaphore(hw))
3301 			return IXGBE_ERR_SWFW_SYNC;
3302 
3303 		gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3304 		if (!(gssr & (fwmask | swmask))) {
3305 			gssr |= swmask;
3306 			IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3307 			ixgbe_release_eeprom_semaphore(hw);
3308 			return IXGBE_SUCCESS;
3309 		} else {
3310 			/* Resource is currently in use by FW or SW */
3311 			ixgbe_release_eeprom_semaphore(hw);
3312 			msec_delay(5);
3313 		}
3314 	}
3315 
3316 	/* If time expired clear the bits holding the lock and retry */
3317 	if (gssr & (fwmask | swmask))
3318 		ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
3319 
3320 	msec_delay(5);
3321 	return IXGBE_ERR_SWFW_SYNC;
3322 }
3323 
3324 /**
3325  *  ixgbe_release_swfw_sync - Release SWFW semaphore
3326  *  @hw: pointer to hardware structure
3327  *  @mask: Mask to specify which semaphore to release
3328  *
3329  *  Releases the SWFW semaphore through the GSSR register for the specified
3330  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
3331  **/
3332 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
3333 {
3334 	u32 gssr;
3335 	u32 swmask = mask;
3336 
3337 	DEBUGFUNC("ixgbe_release_swfw_sync");
3338 
3339 	ixgbe_get_eeprom_semaphore(hw);
3340 
3341 	gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3342 	gssr &= ~swmask;
3343 	IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3344 
3345 	ixgbe_release_eeprom_semaphore(hw);
3346 }
3347 
3348 /**
3349  *  ixgbe_disable_sec_rx_path_generic - Stops the receive data path
3350  *  @hw: pointer to hardware structure
3351  *
3352  *  Stops the receive data path and waits for the HW to internally empty
3353  *  the Rx security block
3354  **/
3355 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
3356 {
3357 #define IXGBE_MAX_SECRX_POLL 40
3358 
3359 	int i;
3360 	int secrxreg;
3361 
3362 	DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
3363 
3364 
3365 	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3366 	secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
3367 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3368 	for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
3369 		secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
3370 		if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
3371 			break;
3372 		else
3373 			/* Use interrupt-safe sleep just in case */
3374 			usec_delay(1000);
3375 	}
3376 
3377 	/* For informational purposes only */
3378 	if (i >= IXGBE_MAX_SECRX_POLL)
3379 		DEBUGOUT("Rx unit being enabled before security "
3380 			 "path fully disabled.  Continuing with init.\n");
3381 
3382 	return IXGBE_SUCCESS;
3383 }
3384 
3385 /**
3386  *  prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
3387  *  @hw: pointer to hardware structure
3388  *  @reg_val: Value we read from AUTOC
3389  *
3390  *  The default case requires no protection so just to the register read.
3391  */
3392 s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
3393 {
3394 	*locked = FALSE;
3395 	*reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3396 	return IXGBE_SUCCESS;
3397 }
3398 
3399 /**
3400  * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
3401  * @hw: pointer to hardware structure
3402  * @reg_val: value to write to AUTOC
3403  * @locked: bool to indicate whether the SW/FW lock was already taken by
3404  *           previous read.
3405  *
3406  * The default case requires no protection so just to the register write.
3407  */
3408 s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
3409 {
3410 	UNREFERENCED_1PARAMETER(locked);
3411 
3412 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
3413 	return IXGBE_SUCCESS;
3414 }
3415 
3416 /**
3417  *  ixgbe_enable_sec_rx_path_generic - Enables the receive data path
3418  *  @hw: pointer to hardware structure
3419  *
3420  *  Enables the receive data path.
3421  **/
3422 s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
3423 {
3424 	u32 secrxreg;
3425 
3426 	DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
3427 
3428 	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3429 	secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
3430 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3431 	IXGBE_WRITE_FLUSH(hw);
3432 
3433 	return IXGBE_SUCCESS;
3434 }
3435 
3436 /**
3437  *  ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
3438  *  @hw: pointer to hardware structure
3439  *  @regval: register value to write to RXCTRL
3440  *
3441  *  Enables the Rx DMA unit
3442  **/
3443 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
3444 {
3445 	DEBUGFUNC("ixgbe_enable_rx_dma_generic");
3446 
3447 	if (regval & IXGBE_RXCTRL_RXEN)
3448 		ixgbe_enable_rx(hw);
3449 	else
3450 		ixgbe_disable_rx(hw);
3451 
3452 	return IXGBE_SUCCESS;
3453 }
3454 
3455 /**
3456  *  ixgbe_blink_led_start_generic - Blink LED based on index.
3457  *  @hw: pointer to hardware structure
3458  *  @index: led number to blink
3459  **/
3460 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
3461 {
3462 	ixgbe_link_speed speed = 0;
3463 	bool link_up = 0;
3464 	u32 autoc_reg = 0;
3465 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3466 	s32 ret_val = IXGBE_SUCCESS;
3467 	bool locked = FALSE;
3468 
3469 	DEBUGFUNC("ixgbe_blink_led_start_generic");
3470 
3471 	if (index > 3)
3472 		return IXGBE_ERR_PARAM;
3473 
3474 	/*
3475 	 * Link must be up to auto-blink the LEDs;
3476 	 * Force it if link is down.
3477 	 */
3478 	hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
3479 
3480 	if (!link_up) {
3481 		ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3482 		if (ret_val != IXGBE_SUCCESS)
3483 			goto out;
3484 
3485 		autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3486 		autoc_reg |= IXGBE_AUTOC_FLU;
3487 
3488 		ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3489 		if (ret_val != IXGBE_SUCCESS)
3490 			goto out;
3491 
3492 		IXGBE_WRITE_FLUSH(hw);
3493 		msec_delay(10);
3494 	}
3495 
3496 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
3497 	led_reg |= IXGBE_LED_BLINK(index);
3498 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3499 	IXGBE_WRITE_FLUSH(hw);
3500 
3501 out:
3502 	return ret_val;
3503 }
3504 
3505 /**
3506  *  ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
3507  *  @hw: pointer to hardware structure
3508  *  @index: led number to stop blinking
3509  **/
3510 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
3511 {
3512 	u32 autoc_reg = 0;
3513 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3514 	s32 ret_val = IXGBE_SUCCESS;
3515 	bool locked = FALSE;
3516 
3517 	DEBUGFUNC("ixgbe_blink_led_stop_generic");
3518 
3519 	if (index > 3)
3520 		return IXGBE_ERR_PARAM;
3521 
3522 	ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3523 	if (ret_val != IXGBE_SUCCESS)
3524 		goto out;
3525 
3526 	autoc_reg &= ~IXGBE_AUTOC_FLU;
3527 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3528 
3529 	ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3530 	if (ret_val != IXGBE_SUCCESS)
3531 		goto out;
3532 
3533 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
3534 	led_reg &= ~IXGBE_LED_BLINK(index);
3535 	led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
3536 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3537 	IXGBE_WRITE_FLUSH(hw);
3538 
3539 out:
3540 	return ret_val;
3541 }
3542 
3543 /**
3544  *  ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
3545  *  @hw: pointer to hardware structure
3546  *  @san_mac_offset: SAN MAC address offset
3547  *
3548  *  This function will read the EEPROM location for the SAN MAC address
3549  *  pointer, and returns the value at that location.  This is used in both
3550  *  get and set mac_addr routines.
3551  **/
3552 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
3553 					 u16 *san_mac_offset)
3554 {
3555 	s32 ret_val;
3556 
3557 	DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
3558 
3559 	/*
3560 	 * First read the EEPROM pointer to see if the MAC addresses are
3561 	 * available.
3562 	 */
3563 	ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
3564 				      san_mac_offset);
3565 	if (ret_val) {
3566 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3567 			      "eeprom at offset %d failed",
3568 			      IXGBE_SAN_MAC_ADDR_PTR);
3569 	}
3570 
3571 	return ret_val;
3572 }
3573 
3574 /**
3575  *  ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
3576  *  @hw: pointer to hardware structure
3577  *  @san_mac_addr: SAN MAC address
3578  *
3579  *  Reads the SAN MAC address from the EEPROM, if it's available.  This is
3580  *  per-port, so set_lan_id() must be called before reading the addresses.
3581  *  set_lan_id() is called by identify_sfp(), but this cannot be relied
3582  *  upon for non-SFP connections, so we must call it here.
3583  **/
3584 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3585 {
3586 	u16 san_mac_data, san_mac_offset;
3587 	u8 i;
3588 	s32 ret_val;
3589 
3590 	DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
3591 
3592 	/*
3593 	 * First read the EEPROM pointer to see if the MAC addresses are
3594 	 * available.  If they're not, no point in calling set_lan_id() here.
3595 	 */
3596 	ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3597 	if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3598 		goto san_mac_addr_out;
3599 
3600 	/* make sure we know which port we need to program */
3601 	hw->mac.ops.set_lan_id(hw);
3602 	/* apply the port offset to the address offset */
3603 	(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3604 			 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3605 	for (i = 0; i < 3; i++) {
3606 		ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
3607 					      &san_mac_data);
3608 		if (ret_val) {
3609 			ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3610 				      "eeprom read at offset %d failed",
3611 				      san_mac_offset);
3612 			goto san_mac_addr_out;
3613 		}
3614 		san_mac_addr[i * 2] = (u8)(san_mac_data);
3615 		san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
3616 		san_mac_offset++;
3617 	}
3618 	return IXGBE_SUCCESS;
3619 
3620 san_mac_addr_out:
3621 	/*
3622 	 * No addresses available in this EEPROM.  It's not an
3623 	 * error though, so just wipe the local address and return.
3624 	 */
3625 	for (i = 0; i < 6; i++)
3626 		san_mac_addr[i] = 0xFF;
3627 	return IXGBE_SUCCESS;
3628 }
3629 
3630 /**
3631  *  ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
3632  *  @hw: pointer to hardware structure
3633  *  @san_mac_addr: SAN MAC address
3634  *
3635  *  Write a SAN MAC address to the EEPROM.
3636  **/
3637 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3638 {
3639 	s32 ret_val;
3640 	u16 san_mac_data, san_mac_offset;
3641 	u8 i;
3642 
3643 	DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
3644 
3645 	/* Look for SAN mac address pointer.  If not defined, return */
3646 	ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3647 	if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3648 		return IXGBE_ERR_NO_SAN_ADDR_PTR;
3649 
3650 	/* Make sure we know which port we need to write */
3651 	hw->mac.ops.set_lan_id(hw);
3652 	/* Apply the port offset to the address offset */
3653 	(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3654 			 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3655 
3656 	for (i = 0; i < 3; i++) {
3657 		san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
3658 		san_mac_data |= (u16)(san_mac_addr[i * 2]);
3659 		hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
3660 		san_mac_offset++;
3661 	}
3662 
3663 	return IXGBE_SUCCESS;
3664 }
3665 
3666 /**
3667  *  ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
3668  *  @hw: pointer to hardware structure
3669  *
3670  *  Read PCIe configuration space, and get the MSI-X vector count from
3671  *  the capabilities table.
3672  **/
3673 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
3674 {
3675 	u16 msix_count = 1;
3676 	u16 max_msix_count;
3677 	u16 pcie_offset;
3678 
3679 	switch (hw->mac.type) {
3680 	case ixgbe_mac_82598EB:
3681 		pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
3682 		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
3683 		break;
3684 	case ixgbe_mac_82599EB:
3685 	case ixgbe_mac_X540:
3686 	case ixgbe_mac_X550:
3687 	case ixgbe_mac_X550EM_x:
3688 	case ixgbe_mac_X550EM_a:
3689 		pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
3690 		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
3691 		break;
3692 	default:
3693 		return msix_count;
3694 	}
3695 
3696 	DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
3697 	msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
3698 	if (IXGBE_REMOVED(hw->hw_addr))
3699 		msix_count = 0;
3700 	msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
3701 
3702 	/* MSI-X count is zero-based in HW */
3703 	msix_count++;
3704 
3705 	if (msix_count > max_msix_count)
3706 		msix_count = max_msix_count;
3707 
3708 	return msix_count;
3709 }
3710 
3711 /**
3712  *  ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
3713  *  @hw: pointer to hardware structure
3714  *  @addr: Address to put into receive address register
3715  *  @vmdq: VMDq pool to assign
3716  *
3717  *  Puts an ethernet address into a receive address register, or
3718  *  finds the rar that it is already in; adds to the pool list
3719  **/
3720 s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
3721 {
3722 	static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
3723 	u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
3724 	u32 rar;
3725 	u32 rar_low, rar_high;
3726 	u32 addr_low, addr_high;
3727 
3728 	DEBUGFUNC("ixgbe_insert_mac_addr_generic");
3729 
3730 	/* swap bytes for HW little endian */
3731 	addr_low  = addr[0] | (addr[1] << 8)
3732 			    | (addr[2] << 16)
3733 			    | (addr[3] << 24);
3734 	addr_high = addr[4] | (addr[5] << 8);
3735 
3736 	/*
3737 	 * Either find the mac_id in rar or find the first empty space.
3738 	 * rar_highwater points to just after the highest currently used
3739 	 * rar in order to shorten the search.  It grows when we add a new
3740 	 * rar to the top.
3741 	 */
3742 	for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
3743 		rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
3744 
3745 		if (((IXGBE_RAH_AV & rar_high) == 0)
3746 		    && first_empty_rar == NO_EMPTY_RAR_FOUND) {
3747 			first_empty_rar = rar;
3748 		} else if ((rar_high & 0xFFFF) == addr_high) {
3749 			rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
3750 			if (rar_low == addr_low)
3751 				break;    /* found it already in the rars */
3752 		}
3753 	}
3754 
3755 	if (rar < hw->mac.rar_highwater) {
3756 		/* already there so just add to the pool bits */
3757 		ixgbe_set_vmdq(hw, rar, vmdq);
3758 	} else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3759 		/* stick it into first empty RAR slot we found */
3760 		rar = first_empty_rar;
3761 		ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3762 	} else if (rar == hw->mac.rar_highwater) {
3763 		/* add it to the top of the list and inc the highwater mark */
3764 		ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3765 		hw->mac.rar_highwater++;
3766 	} else if (rar >= hw->mac.num_rar_entries) {
3767 		return IXGBE_ERR_INVALID_MAC_ADDR;
3768 	}
3769 
3770 	/*
3771 	 * If we found rar[0], make sure the default pool bit (we use pool 0)
3772 	 * remains cleared to be sure default pool packets will get delivered
3773 	 */
3774 	if (rar == 0)
3775 		ixgbe_clear_vmdq(hw, rar, 0);
3776 
3777 	return rar;
3778 }
3779 
3780 /**
3781  *  ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3782  *  @hw: pointer to hardware struct
3783  *  @rar: receive address register index to disassociate
3784  *  @vmdq: VMDq pool index to remove from the rar
3785  **/
3786 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3787 {
3788 	u32 mpsar_lo, mpsar_hi;
3789 	u32 rar_entries = hw->mac.num_rar_entries;
3790 
3791 	DEBUGFUNC("ixgbe_clear_vmdq_generic");
3792 
3793 	/* Make sure we are using a valid rar index range */
3794 	if (rar >= rar_entries) {
3795 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3796 			     "RAR index %d is out of range.\n", rar);
3797 		return IXGBE_ERR_INVALID_ARGUMENT;
3798 	}
3799 
3800 	mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3801 	mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3802 
3803 	if (IXGBE_REMOVED(hw->hw_addr))
3804 		goto done;
3805 
3806 	if (!mpsar_lo && !mpsar_hi)
3807 		goto done;
3808 
3809 	if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
3810 		if (mpsar_lo) {
3811 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3812 			mpsar_lo = 0;
3813 		}
3814 		if (mpsar_hi) {
3815 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3816 			mpsar_hi = 0;
3817 		}
3818 	} else if (vmdq < 32) {
3819 		mpsar_lo &= ~(1 << vmdq);
3820 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
3821 	} else {
3822 		mpsar_hi &= ~(1 << (vmdq - 32));
3823 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
3824 	}
3825 
3826 	/* was that the last pool using this rar? */
3827 	if (mpsar_lo == 0 && mpsar_hi == 0 &&
3828 	    rar != 0 && rar != hw->mac.san_mac_rar_index)
3829 		hw->mac.ops.clear_rar(hw, rar);
3830 done:
3831 	return IXGBE_SUCCESS;
3832 }
3833 
3834 /**
3835  *  ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
3836  *  @hw: pointer to hardware struct
3837  *  @rar: receive address register index to associate with a VMDq index
3838  *  @vmdq: VMDq pool index
3839  **/
3840 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3841 {
3842 	u32 mpsar;
3843 	u32 rar_entries = hw->mac.num_rar_entries;
3844 
3845 	DEBUGFUNC("ixgbe_set_vmdq_generic");
3846 
3847 	/* Make sure we are using a valid rar index range */
3848 	if (rar >= rar_entries) {
3849 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3850 			     "RAR index %d is out of range.\n", rar);
3851 		return IXGBE_ERR_INVALID_ARGUMENT;
3852 	}
3853 
3854 	if (vmdq < 32) {
3855 		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3856 		mpsar |= 1 << vmdq;
3857 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3858 	} else {
3859 		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3860 		mpsar |= 1 << (vmdq - 32);
3861 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3862 	}
3863 	return IXGBE_SUCCESS;
3864 }
3865 
3866 /**
3867  *  This function should only be involved in the IOV mode.
3868  *  In IOV mode, Default pool is next pool after the number of
3869  *  VFs advertized and not 0.
3870  *  MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
3871  *
3872  *  ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
3873  *  @hw: pointer to hardware struct
3874  *  @vmdq: VMDq pool index
3875  **/
3876 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
3877 {
3878 	u32 rar = hw->mac.san_mac_rar_index;
3879 
3880 	DEBUGFUNC("ixgbe_set_vmdq_san_mac");
3881 
3882 	if (vmdq < 32) {
3883 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
3884 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3885 	} else {
3886 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3887 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
3888 	}
3889 
3890 	return IXGBE_SUCCESS;
3891 }
3892 
3893 /**
3894  *  ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3895  *  @hw: pointer to hardware structure
3896  **/
3897 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3898 {
3899 	int i;
3900 
3901 	DEBUGFUNC("ixgbe_init_uta_tables_generic");
3902 	DEBUGOUT(" Clearing UTA\n");
3903 
3904 	for (i = 0; i < 128; i++)
3905 		IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3906 
3907 	return IXGBE_SUCCESS;
3908 }
3909 
3910 /**
3911  *  ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3912  *  @hw: pointer to hardware structure
3913  *  @vlan: VLAN id to write to VLAN filter
3914  *
3915  *  return the VLVF index where this VLAN id should be placed
3916  *
3917  **/
3918 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
3919 {
3920 	s32 regindex, first_empty_slot;
3921 	u32 bits;
3922 
3923 	/* short cut the special case */
3924 	if (vlan == 0)
3925 		return 0;
3926 
3927 	/* if vlvf_bypass is set we don't want to use an empty slot, we
3928 	 * will simply bypass the VLVF if there are no entries present in the
3929 	 * VLVF that contain our VLAN
3930 	 */
3931 	first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0;
3932 
3933 	/* add VLAN enable bit for comparison */
3934 	vlan |= IXGBE_VLVF_VIEN;
3935 
3936 	/* Search for the vlan id in the VLVF entries. Save off the first empty
3937 	 * slot found along the way.
3938 	 *
3939 	 * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1
3940 	 */
3941 	for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) {
3942 		bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3943 		if (bits == vlan)
3944 			return regindex;
3945 		if (!first_empty_slot && !bits)
3946 			first_empty_slot = regindex;
3947 	}
3948 
3949 	/* If we are here then we didn't find the VLAN.  Return first empty
3950 	 * slot we found during our search, else error.
3951 	 */
3952 	if (!first_empty_slot)
3953 		ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "No space in VLVF.\n");
3954 
3955 	return first_empty_slot ? first_empty_slot : IXGBE_ERR_NO_SPACE;
3956 }
3957 
3958 /**
3959  *  ixgbe_set_vfta_generic - Set VLAN filter table
3960  *  @hw: pointer to hardware structure
3961  *  @vlan: VLAN id to write to VLAN filter
3962  *  @vind: VMDq output index that maps queue to VLAN id in VLVFB
3963  *  @vlan_on: boolean flag to turn on/off VLAN
3964  *  @vlvf_bypass: boolean flag indicating updating default pool is okay
3965  *
3966  *  Turn on/off specified VLAN in the VLAN filter table.
3967  **/
3968 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3969 			   bool vlan_on, bool vlvf_bypass)
3970 {
3971 	u32 regidx, vfta_delta, vfta;
3972 	s32 ret_val;
3973 
3974 	DEBUGFUNC("ixgbe_set_vfta_generic");
3975 
3976 	if (vlan > 4095 || vind > 63)
3977 		return IXGBE_ERR_PARAM;
3978 
3979 	/*
3980 	 * this is a 2 part operation - first the VFTA, then the
3981 	 * VLVF and VLVFB if VT Mode is set
3982 	 * We don't write the VFTA until we know the VLVF part succeeded.
3983 	 */
3984 
3985 	/* Part 1
3986 	 * The VFTA is a bitstring made up of 128 32-bit registers
3987 	 * that enable the particular VLAN id, much like the MTA:
3988 	 *    bits[11-5]: which register
3989 	 *    bits[4-0]:  which bit in the register
3990 	 */
3991 	regidx = vlan / 32;
3992 	vfta_delta = 1 << (vlan % 32);
3993 	vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx));
3994 
3995 	/*
3996 	 * vfta_delta represents the difference between the current value
3997 	 * of vfta and the value we want in the register.  Since the diff
3998 	 * is an XOR mask we can just update the vfta using an XOR
3999 	 */
4000 	vfta_delta &= vlan_on ? ~vfta : vfta;
4001 	vfta ^= vfta_delta;
4002 
4003 	/* Part 2
4004 	 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
4005 	 */
4006 	ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, &vfta_delta,
4007 					 vfta, vlvf_bypass);
4008 	if (ret_val != IXGBE_SUCCESS) {
4009 		if (vlvf_bypass)
4010 			goto vfta_update;
4011 		return ret_val;
4012 	}
4013 
4014 vfta_update:
4015 	/* Update VFTA now that we are ready for traffic */
4016 	if (vfta_delta)
4017 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta);
4018 
4019 	return IXGBE_SUCCESS;
4020 }
4021 
4022 /**
4023  *  ixgbe_set_vlvf_generic - Set VLAN Pool Filter
4024  *  @hw: pointer to hardware structure
4025  *  @vlan: VLAN id to write to VLAN filter
4026  *  @vind: VMDq output index that maps queue to VLAN id in VLVFB
4027  *  @vlan_on: boolean flag to turn on/off VLAN in VLVF
4028  *  @vfta_delta: pointer to the difference between the current value of VFTA
4029  *		 and the desired value
4030  *  @vfta: the desired value of the VFTA
4031  *  @vlvf_bypass: boolean flag indicating updating default pool is okay
4032  *
4033  *  Turn on/off specified bit in VLVF table.
4034  **/
4035 s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
4036 			   bool vlan_on, u32 *vfta_delta, u32 vfta,
4037 			   bool vlvf_bypass)
4038 {
4039 	u32 bits;
4040 	s32 vlvf_index;
4041 
4042 	DEBUGFUNC("ixgbe_set_vlvf_generic");
4043 
4044 	if (vlan > 4095 || vind > 63)
4045 		return IXGBE_ERR_PARAM;
4046 
4047 	/* If VT Mode is set
4048 	 *   Either vlan_on
4049 	 *     make sure the vlan is in VLVF
4050 	 *     set the vind bit in the matching VLVFB
4051 	 *   Or !vlan_on
4052 	 *     clear the pool bit and possibly the vind
4053 	 */
4054 	if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE))
4055 		return IXGBE_SUCCESS;
4056 
4057 	vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass);
4058 	if (vlvf_index < 0)
4059 		return vlvf_index;
4060 
4061 	bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32));
4062 
4063 	/* set the pool bit */
4064 	bits |= 1 << (vind % 32);
4065 	if (vlan_on)
4066 		goto vlvf_update;
4067 
4068 	/* clear the pool bit */
4069 	bits ^= 1 << (vind % 32);
4070 
4071 	if (!bits &&
4072 	    !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) {
4073 		/* Clear VFTA first, then disable VLVF.  Otherwise
4074 		 * we run the risk of stray packets leaking into
4075 		 * the PF via the default pool
4076 		 */
4077 		if (*vfta_delta)
4078 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(vlan / 32), vfta);
4079 
4080 		/* disable VLVF and clear remaining bit from pool */
4081 		IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
4082 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0);
4083 
4084 		return IXGBE_SUCCESS;
4085 	}
4086 
4087 	/* If there are still bits set in the VLVFB registers
4088 	 * for the VLAN ID indicated we need to see if the
4089 	 * caller is requesting that we clear the VFTA entry bit.
4090 	 * If the caller has requested that we clear the VFTA
4091 	 * entry bit but there are still pools/VFs using this VLAN
4092 	 * ID entry then ignore the request.  We're not worried
4093 	 * about the case where we're turning the VFTA VLAN ID
4094 	 * entry bit on, only when requested to turn it off as
4095 	 * there may be multiple pools and/or VFs using the
4096 	 * VLAN ID entry.  In that case we cannot clear the
4097 	 * VFTA bit until all pools/VFs using that VLAN ID have also
4098 	 * been cleared.  This will be indicated by "bits" being
4099 	 * zero.
4100 	 */
4101 	*vfta_delta = 0;
4102 
4103 vlvf_update:
4104 	/* record pool change and enable VLAN ID if not already enabled */
4105 	IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits);
4106 	IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan);
4107 
4108 	return IXGBE_SUCCESS;
4109 }
4110 
4111 /**
4112  *  ixgbe_clear_vfta_generic - Clear VLAN filter table
4113  *  @hw: pointer to hardware structure
4114  *
4115  *  Clears the VLAN filer table, and the VMDq index associated with the filter
4116  **/
4117 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
4118 {
4119 	u32 offset;
4120 
4121 	DEBUGFUNC("ixgbe_clear_vfta_generic");
4122 
4123 	for (offset = 0; offset < hw->mac.vft_size; offset++)
4124 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
4125 
4126 	for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
4127 		IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
4128 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
4129 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
4130 	}
4131 
4132 	return IXGBE_SUCCESS;
4133 }
4134 
4135 /**
4136  *  ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix
4137  *  @hw: pointer to hardware structure
4138  *
4139  *  Contains the logic to identify if we need to verify link for the
4140  *  crosstalk fix
4141  **/
4142 static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw)
4143 {
4144 
4145 	/* Does FW say we need the fix */
4146 	if (!hw->need_crosstalk_fix)
4147 		return FALSE;
4148 
4149 	/* Only consider SFP+ PHYs i.e. media type fiber */
4150 	switch (hw->mac.ops.get_media_type(hw)) {
4151 	case ixgbe_media_type_fiber:
4152 	case ixgbe_media_type_fiber_qsfp:
4153 		break;
4154 	default:
4155 		return FALSE;
4156 	}
4157 
4158 	return TRUE;
4159 }
4160 
4161 /**
4162  *  ixgbe_check_mac_link_generic - Determine link and speed status
4163  *  @hw: pointer to hardware structure
4164  *  @speed: pointer to link speed
4165  *  @link_up: TRUE when link is up
4166  *  @link_up_wait_to_complete: bool used to wait for link up or not
4167  *
4168  *  Reads the links register to determine if link is up and the current speed
4169  **/
4170 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4171 				 bool *link_up, bool link_up_wait_to_complete)
4172 {
4173 	u32 links_reg, links_orig;
4174 	u32 i;
4175 
4176 	DEBUGFUNC("ixgbe_check_mac_link_generic");
4177 
4178 	/* If Crosstalk fix enabled do the sanity check of making sure
4179 	 * the SFP+ cage is full.
4180 	 */
4181 	if (ixgbe_need_crosstalk_fix(hw)) {
4182 		u32 sfp_cage_full;
4183 
4184 		switch (hw->mac.type) {
4185 		case ixgbe_mac_82599EB:
4186 			sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4187 					IXGBE_ESDP_SDP2;
4188 			break;
4189 		case ixgbe_mac_X550EM_x:
4190 		case ixgbe_mac_X550EM_a:
4191 			sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4192 					IXGBE_ESDP_SDP0;
4193 			break;
4194 		default:
4195 			/* sanity check - No SFP+ devices here */
4196 			sfp_cage_full = FALSE;
4197 			break;
4198 		}
4199 
4200 		if (!sfp_cage_full) {
4201 			*link_up = FALSE;
4202 			*speed = IXGBE_LINK_SPEED_UNKNOWN;
4203 			return IXGBE_SUCCESS;
4204 		}
4205 	}
4206 
4207 	/* clear the old state */
4208 	links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
4209 
4210 	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4211 
4212 	if (links_orig != links_reg) {
4213 		DEBUGOUT2("LINKS changed from %08X to %08X\n",
4214 			  links_orig, links_reg);
4215 	}
4216 
4217 	if (link_up_wait_to_complete) {
4218 		for (i = 0; i < hw->mac.max_link_up_time; i++) {
4219 			if (links_reg & IXGBE_LINKS_UP) {
4220 				*link_up = TRUE;
4221 				break;
4222 			} else {
4223 				*link_up = FALSE;
4224 			}
4225 			msec_delay(100);
4226 			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4227 		}
4228 	} else {
4229 		if (links_reg & IXGBE_LINKS_UP)
4230 			*link_up = TRUE;
4231 		else
4232 			*link_up = FALSE;
4233 	}
4234 
4235 	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
4236 	case IXGBE_LINKS_SPEED_10G_82599:
4237 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
4238 		if (hw->mac.type >= ixgbe_mac_X550) {
4239 			if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4240 				*speed = IXGBE_LINK_SPEED_2_5GB_FULL;
4241 		}
4242 		break;
4243 	case IXGBE_LINKS_SPEED_1G_82599:
4244 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
4245 		break;
4246 	case IXGBE_LINKS_SPEED_100_82599:
4247 		*speed = IXGBE_LINK_SPEED_100_FULL;
4248 		if (hw->mac.type == ixgbe_mac_X550) {
4249 			if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4250 				*speed = IXGBE_LINK_SPEED_5GB_FULL;
4251 		}
4252 		break;
4253 	case IXGBE_LINKS_SPEED_10_X550EM_A:
4254 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
4255 		if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
4256 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
4257 			*speed = IXGBE_LINK_SPEED_10_FULL;
4258 		}
4259 		break;
4260 	default:
4261 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
4262 	}
4263 
4264 	return IXGBE_SUCCESS;
4265 }
4266 
4267 /**
4268  *  ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
4269  *  the EEPROM
4270  *  @hw: pointer to hardware structure
4271  *  @wwnn_prefix: the alternative WWNN prefix
4272  *  @wwpn_prefix: the alternative WWPN prefix
4273  *
4274  *  This function will read the EEPROM from the alternative SAN MAC address
4275  *  block to check the support for the alternative WWNN/WWPN prefix support.
4276  **/
4277 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
4278 				 u16 *wwpn_prefix)
4279 {
4280 	u16 offset, caps;
4281 	u16 alt_san_mac_blk_offset;
4282 
4283 	DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
4284 
4285 	/* clear output first */
4286 	*wwnn_prefix = 0xFFFF;
4287 	*wwpn_prefix = 0xFFFF;
4288 
4289 	/* check if alternative SAN MAC is supported */
4290 	offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
4291 	if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
4292 		goto wwn_prefix_err;
4293 
4294 	if ((alt_san_mac_blk_offset == 0) ||
4295 	    (alt_san_mac_blk_offset == 0xFFFF))
4296 		goto wwn_prefix_out;
4297 
4298 	/* check capability in alternative san mac address block */
4299 	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
4300 	if (hw->eeprom.ops.read(hw, offset, &caps))
4301 		goto wwn_prefix_err;
4302 	if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
4303 		goto wwn_prefix_out;
4304 
4305 	/* get the corresponding prefix for WWNN/WWPN */
4306 	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
4307 	if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) {
4308 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4309 			      "eeprom read at offset %d failed", offset);
4310 	}
4311 
4312 	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
4313 	if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
4314 		goto wwn_prefix_err;
4315 
4316 wwn_prefix_out:
4317 	return IXGBE_SUCCESS;
4318 
4319 wwn_prefix_err:
4320 	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4321 		      "eeprom read at offset %d failed", offset);
4322 	return IXGBE_SUCCESS;
4323 }
4324 
4325 /**
4326  *  ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
4327  *  @hw: pointer to hardware structure
4328  *  @bs: the fcoe boot status
4329  *
4330  *  This function will read the FCOE boot status from the iSCSI FCOE block
4331  **/
4332 s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
4333 {
4334 	u16 offset, caps, flags;
4335 	s32 status;
4336 
4337 	DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
4338 
4339 	/* clear output first */
4340 	*bs = ixgbe_fcoe_bootstatus_unavailable;
4341 
4342 	/* check if FCOE IBA block is present */
4343 	offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
4344 	status = hw->eeprom.ops.read(hw, offset, &caps);
4345 	if (status != IXGBE_SUCCESS)
4346 		goto out;
4347 
4348 	if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
4349 		goto out;
4350 
4351 	/* check if iSCSI FCOE block is populated */
4352 	status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
4353 	if (status != IXGBE_SUCCESS)
4354 		goto out;
4355 
4356 	if ((offset == 0) || (offset == 0xFFFF))
4357 		goto out;
4358 
4359 	/* read fcoe flags in iSCSI FCOE block */
4360 	offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
4361 	status = hw->eeprom.ops.read(hw, offset, &flags);
4362 	if (status != IXGBE_SUCCESS)
4363 		goto out;
4364 
4365 	if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
4366 		*bs = ixgbe_fcoe_bootstatus_enabled;
4367 	else
4368 		*bs = ixgbe_fcoe_bootstatus_disabled;
4369 
4370 out:
4371 	return status;
4372 }
4373 
4374 /**
4375  *  ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
4376  *  @hw: pointer to hardware structure
4377  *  @enable: enable or disable switch for MAC anti-spoofing
4378  *  @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing
4379  *
4380  **/
4381 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4382 {
4383 	int vf_target_reg = vf >> 3;
4384 	int vf_target_shift = vf % 8;
4385 	u32 pfvfspoof;
4386 
4387 	if (hw->mac.type == ixgbe_mac_82598EB)
4388 		return;
4389 
4390 	pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4391 	if (enable)
4392 		pfvfspoof |= (1 << vf_target_shift);
4393 	else
4394 		pfvfspoof &= ~(1 << vf_target_shift);
4395 	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4396 }
4397 
4398 /**
4399  *  ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
4400  *  @hw: pointer to hardware structure
4401  *  @enable: enable or disable switch for VLAN anti-spoofing
4402  *  @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
4403  *
4404  **/
4405 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4406 {
4407 	int vf_target_reg = vf >> 3;
4408 	int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
4409 	u32 pfvfspoof;
4410 
4411 	if (hw->mac.type == ixgbe_mac_82598EB)
4412 		return;
4413 
4414 	pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4415 	if (enable)
4416 		pfvfspoof |= (1 << vf_target_shift);
4417 	else
4418 		pfvfspoof &= ~(1 << vf_target_shift);
4419 	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4420 }
4421 
4422 /**
4423  *  ixgbe_get_device_caps_generic - Get additional device capabilities
4424  *  @hw: pointer to hardware structure
4425  *  @device_caps: the EEPROM word with the extra device capabilities
4426  *
4427  *  This function will read the EEPROM location for the device capabilities,
4428  *  and return the word through device_caps.
4429  **/
4430 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
4431 {
4432 	DEBUGFUNC("ixgbe_get_device_caps_generic");
4433 
4434 	hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
4435 
4436 	return IXGBE_SUCCESS;
4437 }
4438 
4439 /**
4440  *  ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
4441  *  @hw: pointer to hardware structure
4442  *
4443  **/
4444 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
4445 {
4446 	u32 regval;
4447 	u32 i;
4448 
4449 	DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
4450 
4451 	/* Enable relaxed ordering */
4452 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
4453 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
4454 		regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4455 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
4456 	}
4457 
4458 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
4459 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
4460 		regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
4461 			  IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
4462 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
4463 	}
4464 
4465 }
4466 
4467 /**
4468  *  ixgbe_calculate_checksum - Calculate checksum for buffer
4469  *  @buffer: pointer to EEPROM
4470  *  @length: size of EEPROM to calculate a checksum for
4471  *  Calculates the checksum for some buffer on a specified length.  The
4472  *  checksum calculated is returned.
4473  **/
4474 u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
4475 {
4476 	u32 i;
4477 	u8 sum = 0;
4478 
4479 	DEBUGFUNC("ixgbe_calculate_checksum");
4480 
4481 	if (!buffer)
4482 		return 0;
4483 
4484 	for (i = 0; i < length; i++)
4485 		sum += buffer[i];
4486 
4487 	return (u8) (0 - sum);
4488 }
4489 
4490 /**
4491  *  ixgbe_hic_unlocked - Issue command to manageability block unlocked
4492  *  @hw: pointer to the HW structure
4493  *  @buffer: command to write and where the return status will be placed
4494  *  @length: length of buffer, must be multiple of 4 bytes
4495  *  @timeout: time in ms to wait for command completion
4496  *
4497  *  Communicates with the manageability block. On success return IXGBE_SUCCESS
4498  *  else returns semaphore error when encountering an error acquiring
4499  *  semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4500  *
4501  *  This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held
4502  *  by the caller.
4503  **/
4504 s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
4505 		       u32 timeout)
4506 {
4507 	u32 hicr, i, fwsts;
4508 	u16 dword_len;
4509 
4510 	DEBUGFUNC("ixgbe_hic_unlocked");
4511 
4512 	if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4513 		DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
4514 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4515 	}
4516 
4517 	/* Set bit 9 of FWSTS clearing FW reset indication */
4518 	fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
4519 	IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
4520 
4521 	/* Check that the host interface is enabled. */
4522 	hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4523 	if (!(hicr & IXGBE_HICR_EN)) {
4524 		DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
4525 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4526 	}
4527 
4528 	/* Calculate length in DWORDs. We must be DWORD aligned */
4529 	if (length % sizeof(u32)) {
4530 		DEBUGOUT("Buffer length failure, not aligned to dword");
4531 		return IXGBE_ERR_INVALID_ARGUMENT;
4532 	}
4533 
4534 	dword_len = length >> 2;
4535 
4536 	/* The device driver writes the relevant command block
4537 	 * into the ram area.
4538 	 */
4539 	for (i = 0; i < dword_len; i++)
4540 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
4541 				      i, IXGBE_CPU_TO_LE32(buffer[i]));
4542 
4543 	/* Setting this bit tells the ARC that a new command is pending. */
4544 	IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
4545 
4546 	for (i = 0; i < timeout; i++) {
4547 		hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4548 		if (!(hicr & IXGBE_HICR_C))
4549 			break;
4550 		msec_delay(1);
4551 	}
4552 
4553 	/* Check command completion */
4554 	if ((timeout && i == timeout) ||
4555 	    !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
4556 		ERROR_REPORT1(IXGBE_ERROR_CAUTION,
4557 			     "Command has failed with no status valid.\n");
4558 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4559 	}
4560 
4561 	return IXGBE_SUCCESS;
4562 }
4563 
4564 /**
4565  *  ixgbe_host_interface_command - Issue command to manageability block
4566  *  @hw: pointer to the HW structure
4567  *  @buffer: contains the command to write and where the return status will
4568  *   be placed
4569  *  @length: length of buffer, must be multiple of 4 bytes
4570  *  @timeout: time in ms to wait for command completion
4571  *  @return_data: read and return data from the buffer (TRUE) or not (FALSE)
4572  *   Needed because FW structures are big endian and decoding of
4573  *   these fields can be 8 bit or 16 bit based on command. Decoding
4574  *   is not easily understood without making a table of commands.
4575  *   So we will leave this up to the caller to read back the data
4576  *   in these cases.
4577  *
4578  *  Communicates with the manageability block. On success return IXGBE_SUCCESS
4579  *  else returns semaphore error when encountering an error acquiring
4580  *  semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4581  **/
4582 s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
4583 				 u32 length, u32 timeout, bool return_data)
4584 {
4585 	u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
4586 	u16 dword_len;
4587 	u16 buf_len;
4588 	s32 status;
4589 	u32 bi;
4590 
4591 	DEBUGFUNC("ixgbe_host_interface_command");
4592 
4593 	if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4594 		DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
4595 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4596 	}
4597 
4598 	/* Take management host interface semaphore */
4599 	status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4600 	if (status)
4601 		return status;
4602 
4603 	status = ixgbe_hic_unlocked(hw, buffer, length, timeout);
4604 	if (status)
4605 		goto rel_out;
4606 
4607 	if (!return_data)
4608 		goto rel_out;
4609 
4610 	/* Calculate length in DWORDs */
4611 	dword_len = hdr_size >> 2;
4612 
4613 	/* first pull in the header so we know the buffer length */
4614 	for (bi = 0; bi < dword_len; bi++) {
4615 		buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4616 		IXGBE_LE32_TO_CPUS(&buffer[bi]);
4617 	}
4618 
4619 	/* If there is any thing in data position pull it in */
4620 	buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
4621 	if (!buf_len)
4622 		goto rel_out;
4623 
4624 	if (length < buf_len + hdr_size) {
4625 		DEBUGOUT("Buffer not large enough for reply message.\n");
4626 		status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4627 		goto rel_out;
4628 	}
4629 
4630 	/* Calculate length in DWORDs, add 3 for odd lengths */
4631 	dword_len = (buf_len + 3) >> 2;
4632 
4633 	/* Pull in the rest of the buffer (bi is where we left off) */
4634 	for (; bi <= dword_len; bi++) {
4635 		buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4636 		IXGBE_LE32_TO_CPUS(&buffer[bi]);
4637 	}
4638 
4639 rel_out:
4640 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4641 
4642 	return status;
4643 }
4644 
4645 /**
4646  *  ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
4647  *  @hw: pointer to the HW structure
4648  *  @maj: driver version major number
4649  *  @min: driver version minor number
4650  *  @build: driver version build number
4651  *  @sub: driver version sub build number
4652  *
4653  *  Sends driver version number to firmware through the manageability
4654  *  block.  On success return IXGBE_SUCCESS
4655  *  else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4656  *  semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4657  **/
4658 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
4659 				 u8 build, u8 sub, u16 len,
4660 				 const char *driver_ver)
4661 {
4662 	struct ixgbe_hic_drv_info fw_cmd;
4663 	int i;
4664 	s32 ret_val = IXGBE_SUCCESS;
4665 
4666 	DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
4667 	UNREFERENCED_2PARAMETER(len, driver_ver);
4668 
4669 	fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4670 	fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
4671 	fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4672 	fw_cmd.port_num = (u8)hw->bus.func;
4673 	fw_cmd.ver_maj = maj;
4674 	fw_cmd.ver_min = min;
4675 	fw_cmd.ver_build = build;
4676 	fw_cmd.ver_sub = sub;
4677 	fw_cmd.hdr.checksum = 0;
4678 	fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4679 				(FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4680 	fw_cmd.pad = 0;
4681 	fw_cmd.pad2 = 0;
4682 
4683 	for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4684 		ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4685 						       sizeof(fw_cmd),
4686 						       IXGBE_HI_COMMAND_TIMEOUT,
4687 						       TRUE);
4688 		if (ret_val != IXGBE_SUCCESS)
4689 			continue;
4690 
4691 		if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4692 		    FW_CEM_RESP_STATUS_SUCCESS)
4693 			ret_val = IXGBE_SUCCESS;
4694 		else
4695 			ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4696 
4697 		break;
4698 	}
4699 
4700 	return ret_val;
4701 }
4702 
4703 /**
4704  * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
4705  * @hw: pointer to hardware structure
4706  * @num_pb: number of packet buffers to allocate
4707  * @headroom: reserve n KB of headroom
4708  * @strategy: packet buffer allocation strategy
4709  **/
4710 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
4711 			     int strategy)
4712 {
4713 	u32 pbsize = hw->mac.rx_pb_size;
4714 	int i = 0;
4715 	u32 rxpktsize, txpktsize, txpbthresh;
4716 
4717 	/* Reserve headroom */
4718 	pbsize -= headroom;
4719 
4720 	if (!num_pb)
4721 		num_pb = 1;
4722 
4723 	/* Divide remaining packet buffer space amongst the number of packet
4724 	 * buffers requested using supplied strategy.
4725 	 */
4726 	switch (strategy) {
4727 	case PBA_STRATEGY_WEIGHTED:
4728 		/* ixgbe_dcb_pba_80_48 strategy weight first half of packet
4729 		 * buffer with 5/8 of the packet buffer space.
4730 		 */
4731 		rxpktsize = (pbsize * 5) / (num_pb * 4);
4732 		pbsize -= rxpktsize * (num_pb / 2);
4733 		rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
4734 		for (; i < (num_pb / 2); i++)
4735 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4736 		/* fall through - configure remaining packet buffers */
4737 	case PBA_STRATEGY_EQUAL:
4738 		rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
4739 		for (; i < num_pb; i++)
4740 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4741 		break;
4742 	default:
4743 		break;
4744 	}
4745 
4746 	/* Only support an equally distributed Tx packet buffer strategy. */
4747 	txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
4748 	txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
4749 	for (i = 0; i < num_pb; i++) {
4750 		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4751 		IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4752 	}
4753 
4754 	/* Clear unused TCs, if any, to zero buffer size*/
4755 	for (; i < IXGBE_MAX_PB; i++) {
4756 		IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4757 		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4758 		IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4759 	}
4760 }
4761 
4762 /**
4763  * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
4764  * @hw: pointer to the hardware structure
4765  *
4766  * The 82599 and x540 MACs can experience issues if TX work is still pending
4767  * when a reset occurs.  This function prevents this by flushing the PCIe
4768  * buffers on the system.
4769  **/
4770 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
4771 {
4772 	u32 gcr_ext, hlreg0, i, poll;
4773 	u16 value;
4774 
4775 	/*
4776 	 * If double reset is not requested then all transactions should
4777 	 * already be clear and as such there is no work to do
4778 	 */
4779 	if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
4780 		return;
4781 
4782 	/*
4783 	 * Set loopback enable to prevent any transmits from being sent
4784 	 * should the link come up.  This assumes that the RXCTRL.RXEN bit
4785 	 * has already been cleared.
4786 	 */
4787 	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4788 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
4789 
4790 	/* Wait for a last completion before clearing buffers */
4791 	IXGBE_WRITE_FLUSH(hw);
4792 	msec_delay(3);
4793 
4794 	/*
4795 	 * Before proceeding, make sure that the PCIe block does not have
4796 	 * transactions pending.
4797 	 */
4798 	poll = ixgbe_pcie_timeout_poll(hw);
4799 	for (i = 0; i < poll; i++) {
4800 		usec_delay(100);
4801 		value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
4802 		if (IXGBE_REMOVED(hw->hw_addr))
4803 			goto out;
4804 		if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
4805 			goto out;
4806 	}
4807 
4808 out:
4809 	/* initiate cleaning flow for buffers in the PCIe transaction layer */
4810 	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
4811 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
4812 			gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
4813 
4814 	/* Flush all writes and allow 20usec for all transactions to clear */
4815 	IXGBE_WRITE_FLUSH(hw);
4816 	usec_delay(20);
4817 
4818 	/* restore previous register values */
4819 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4820 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4821 }
4822 
4823 /**
4824  *  ixgbe_bypass_rw_generic - Bit bang data into by_pass FW
4825  *
4826  *  @hw: pointer to hardware structure
4827  *  @cmd: Command we send to the FW
4828  *  @status: The reply from the FW
4829  *
4830  *  Bit-bangs the cmd to the by_pass FW status points to what is returned.
4831  **/
4832 #define IXGBE_BYPASS_BB_WAIT 1
4833 s32 ixgbe_bypass_rw_generic(struct ixgbe_hw *hw, u32 cmd, u32 *status)
4834 {
4835 	int i;
4836 	u32 sck, sdi, sdo, dir_sck, dir_sdi, dir_sdo;
4837 	u32 esdp;
4838 
4839 	if (!status)
4840 		return IXGBE_ERR_PARAM;
4841 
4842 	*status = 0;
4843 
4844 	/* SDP vary by MAC type */
4845 	switch (hw->mac.type) {
4846 	case ixgbe_mac_82599EB:
4847 		sck = IXGBE_ESDP_SDP7;
4848 		sdi = IXGBE_ESDP_SDP0;
4849 		sdo = IXGBE_ESDP_SDP6;
4850 		dir_sck = IXGBE_ESDP_SDP7_DIR;
4851 		dir_sdi = IXGBE_ESDP_SDP0_DIR;
4852 		dir_sdo = IXGBE_ESDP_SDP6_DIR;
4853 		break;
4854 	case ixgbe_mac_X540:
4855 		sck = IXGBE_ESDP_SDP2;
4856 		sdi = IXGBE_ESDP_SDP0;
4857 		sdo = IXGBE_ESDP_SDP1;
4858 		dir_sck = IXGBE_ESDP_SDP2_DIR;
4859 		dir_sdi = IXGBE_ESDP_SDP0_DIR;
4860 		dir_sdo = IXGBE_ESDP_SDP1_DIR;
4861 		break;
4862 	default:
4863 		return IXGBE_ERR_DEVICE_NOT_SUPPORTED;
4864 	}
4865 
4866 	/* Set SDP pins direction */
4867 	esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
4868 	esdp |= dir_sck;	/* SCK as output */
4869 	esdp |= dir_sdi;	/* SDI as output */
4870 	esdp &= ~dir_sdo;	/* SDO as input */
4871 	esdp |= sck;
4872 	esdp |= sdi;
4873 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4874 	IXGBE_WRITE_FLUSH(hw);
4875 	msec_delay(IXGBE_BYPASS_BB_WAIT);
4876 
4877 	/* Generate start condition */
4878 	esdp &= ~sdi;
4879 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4880 	IXGBE_WRITE_FLUSH(hw);
4881 	msec_delay(IXGBE_BYPASS_BB_WAIT);
4882 
4883 	esdp &= ~sck;
4884 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4885 	IXGBE_WRITE_FLUSH(hw);
4886 	msec_delay(IXGBE_BYPASS_BB_WAIT);
4887 
4888 	/* Clock out the new control word and clock in the status */
4889 	for (i = 0; i < 32; i++) {
4890 		if ((cmd >> (31 - i)) & 0x01) {
4891 			esdp |= sdi;
4892 			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4893 		} else {
4894 			esdp &= ~sdi;
4895 			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4896 		}
4897 		IXGBE_WRITE_FLUSH(hw);
4898 		msec_delay(IXGBE_BYPASS_BB_WAIT);
4899 
4900 		esdp |= sck;
4901 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4902 		IXGBE_WRITE_FLUSH(hw);
4903 		msec_delay(IXGBE_BYPASS_BB_WAIT);
4904 
4905 		esdp &= ~sck;
4906 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4907 		IXGBE_WRITE_FLUSH(hw);
4908 		msec_delay(IXGBE_BYPASS_BB_WAIT);
4909 
4910 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
4911 		if (esdp & sdo)
4912 			*status = (*status << 1) | 0x01;
4913 		else
4914 			*status = (*status << 1) | 0x00;
4915 		msec_delay(IXGBE_BYPASS_BB_WAIT);
4916 	}
4917 
4918 	/* stop condition */
4919 	esdp |= sck;
4920 	esdp &= ~sdi;
4921 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4922 	IXGBE_WRITE_FLUSH(hw);
4923 	msec_delay(IXGBE_BYPASS_BB_WAIT);
4924 
4925 	esdp |= sdi;
4926 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4927 	IXGBE_WRITE_FLUSH(hw);
4928 
4929 	/* set the page bits to match the cmd that the status it belongs to */
4930 	*status = (*status & 0x3fffffff) | (cmd & 0xc0000000);
4931 
4932 	return IXGBE_SUCCESS;
4933 }
4934 
4935 /**
4936  * ixgbe_bypass_valid_rd_generic - Verify valid return from bit-bang.
4937  *
4938  * If we send a write we can't be sure it took until we can read back
4939  * that same register.  It can be a problem as some of the feilds may
4940  * for valid reasons change inbetween the time wrote the register and
4941  * we read it again to verify.  So this function check everything we
4942  * can check and then assumes it worked.
4943  *
4944  * @u32 in_reg - The register cmd for the bit-bang read.
4945  * @u32 out_reg - The register returned from a bit-bang read.
4946  **/
4947 bool ixgbe_bypass_valid_rd_generic(u32 in_reg, u32 out_reg)
4948 {
4949 	u32 mask;
4950 
4951 	/* Page must match for all control pages */
4952 	if ((in_reg & BYPASS_PAGE_M) != (out_reg & BYPASS_PAGE_M))
4953 		return FALSE;
4954 
4955 	switch (in_reg & BYPASS_PAGE_M) {
4956 	case BYPASS_PAGE_CTL0:
4957 		/* All the following can't change since the last write
4958 		 *  - All the event actions
4959 		 *  - The timeout value
4960 		 */
4961 		mask = BYPASS_AUX_ON_M | BYPASS_MAIN_ON_M |
4962 		       BYPASS_MAIN_OFF_M | BYPASS_AUX_OFF_M |
4963 		       BYPASS_WDTIMEOUT_M |
4964 		       BYPASS_WDT_VALUE_M;
4965 		if ((out_reg & mask) != (in_reg & mask))
4966 			return FALSE;
4967 
4968 		/* 0x0 is never a valid value for bypass status */
4969 		if (!(out_reg & BYPASS_STATUS_OFF_M))
4970 			return FALSE;
4971 		break;
4972 	case BYPASS_PAGE_CTL1:
4973 		/* All the following can't change since the last write
4974 		 *  - time valid bit
4975 		 *  - time we last sent
4976 		 */
4977 		mask = BYPASS_CTL1_VALID_M | BYPASS_CTL1_TIME_M;
4978 		if ((out_reg & mask) != (in_reg & mask))
4979 			return FALSE;
4980 		break;
4981 	case BYPASS_PAGE_CTL2:
4982 		/* All we can check in this page is control number
4983 		 * which is already done above.
4984 		 */
4985 		break;
4986 	}
4987 
4988 	/* We are as sure as we can be return TRUE */
4989 	return TRUE;
4990 }
4991 
4992 /**
4993  *  ixgbe_bypass_set_generic - Set a bypass field in the FW CTRL Regiter.
4994  *
4995  *  @hw: pointer to hardware structure
4996  *  @cmd: The control word we are setting.
4997  *  @event: The event we are setting in the FW.  This also happens to
4998  *	    be the mask for the event we are setting (handy)
4999  *  @action: The action we set the event to in the FW. This is in a
5000  *	     bit field that happens to be what we want to put in
5001  *	     the event spot (also handy)
5002  **/
5003 s32 ixgbe_bypass_set_generic(struct ixgbe_hw *hw, u32 ctrl, u32 event,
5004 			     u32 action)
5005 {
5006 	u32 by_ctl = 0;
5007 	u32 cmd, verify;
5008 	u32 count = 0;
5009 
5010 	/* Get current values */
5011 	cmd = ctrl;	/* just reading only need control number */
5012 	if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl))
5013 		return IXGBE_ERR_INVALID_ARGUMENT;
5014 
5015 	/* Set to new action */
5016 	cmd = (by_ctl & ~event) | BYPASS_WE | action;
5017 	if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl))
5018 		return IXGBE_ERR_INVALID_ARGUMENT;
5019 
5020 	/* Page 0 force a FW eeprom write which is slow so verify */
5021 	if ((cmd & BYPASS_PAGE_M) == BYPASS_PAGE_CTL0) {
5022 		verify = BYPASS_PAGE_CTL0;
5023 		do {
5024 			if (count++ > 5)
5025 				return IXGBE_BYPASS_FW_WRITE_FAILURE;
5026 
5027 			if (ixgbe_bypass_rw_generic(hw, verify, &by_ctl))
5028 				return IXGBE_ERR_INVALID_ARGUMENT;
5029 		} while (!ixgbe_bypass_valid_rd_generic(cmd, by_ctl));
5030 	} else {
5031 		/* We have give the FW time for the write to stick */
5032 		msec_delay(100);
5033 	}
5034 
5035 	return IXGBE_SUCCESS;
5036 }
5037 
5038 /**
5039  *  ixgbe_bypass_rd_eep_generic - Read the bypass FW eeprom addres.
5040  *
5041  *  @hw: pointer to hardware structure
5042  *  @addr: The bypass eeprom address to read.
5043  *  @value: The 8b of data at the address above.
5044  **/
5045 s32 ixgbe_bypass_rd_eep_generic(struct ixgbe_hw *hw, u32 addr, u8 *value)
5046 {
5047 	u32 cmd;
5048 	u32 status;
5049 
5050 
5051 	/* send the request */
5052 	cmd = BYPASS_PAGE_CTL2 | BYPASS_WE;
5053 	cmd |= (addr << BYPASS_CTL2_OFFSET_SHIFT) & BYPASS_CTL2_OFFSET_M;
5054 	if (ixgbe_bypass_rw_generic(hw, cmd, &status))
5055 		return IXGBE_ERR_INVALID_ARGUMENT;
5056 
5057 	/* We have give the FW time for the write to stick */
5058 	msec_delay(100);
5059 
5060 	/* now read the results */
5061 	cmd &= ~BYPASS_WE;
5062 	if (ixgbe_bypass_rw_generic(hw, cmd, &status))
5063 		return IXGBE_ERR_INVALID_ARGUMENT;
5064 
5065 	*value = status & BYPASS_CTL2_DATA_M;
5066 
5067 	return IXGBE_SUCCESS;
5068 }
5069 
5070 
5071 /**
5072  * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg
5073  * @hw: pointer to hardware structure
5074  * @map: pointer to u8 arr for returning map
5075  *
5076  * Read the rtrup2tc HW register and resolve its content into map
5077  **/
5078 void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map)
5079 {
5080 	u32 reg, i;
5081 
5082 	reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
5083 	for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
5084 		map[i] = IXGBE_RTRUP2TC_UP_MASK &
5085 			(reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
5086 	return;
5087 }
5088 
5089 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
5090 {
5091 	u32 pfdtxgswc;
5092 	u32 rxctrl;
5093 
5094 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5095 	if (rxctrl & IXGBE_RXCTRL_RXEN) {
5096 		if (hw->mac.type != ixgbe_mac_82598EB) {
5097 			pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
5098 			if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
5099 				pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
5100 				IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
5101 				hw->mac.set_lben = TRUE;
5102 			} else {
5103 				hw->mac.set_lben = FALSE;
5104 			}
5105 		}
5106 		rxctrl &= ~IXGBE_RXCTRL_RXEN;
5107 		IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
5108 	}
5109 }
5110 
5111 void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
5112 {
5113 	u32 pfdtxgswc;
5114 	u32 rxctrl;
5115 
5116 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5117 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN));
5118 
5119 	if (hw->mac.type != ixgbe_mac_82598EB) {
5120 		if (hw->mac.set_lben) {
5121 			pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
5122 			pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN;
5123 			IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
5124 			hw->mac.set_lben = FALSE;
5125 		}
5126 	}
5127 }
5128 
5129 /**
5130  * ixgbe_mng_present - returns TRUE when management capability is present
5131  * @hw: pointer to hardware structure
5132  */
5133 bool ixgbe_mng_present(struct ixgbe_hw *hw)
5134 {
5135 	u32 fwsm;
5136 
5137 	if (hw->mac.type < ixgbe_mac_82599EB)
5138 		return FALSE;
5139 
5140 	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
5141 	fwsm &= IXGBE_FWSM_MODE_MASK;
5142 	return fwsm == IXGBE_FWSM_FW_MODE_PT;
5143 }
5144 
5145 /**
5146  * ixgbe_mng_enabled - Is the manageability engine enabled?
5147  * @hw: pointer to hardware structure
5148  *
5149  * Returns TRUE if the manageability engine is enabled.
5150  **/
5151 bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
5152 {
5153 	u32 fwsm, manc, factps;
5154 
5155 	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
5156 	if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
5157 		return FALSE;
5158 
5159 	manc = IXGBE_READ_REG(hw, IXGBE_MANC);
5160 	if (!(manc & IXGBE_MANC_RCV_TCO_EN))
5161 		return FALSE;
5162 
5163 	if (hw->mac.type <= ixgbe_mac_X540) {
5164 		factps = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
5165 		if (factps & IXGBE_FACTPS_MNGCG)
5166 			return FALSE;
5167 	}
5168 
5169 	return TRUE;
5170 }
5171 
5172 /**
5173  *  ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
5174  *  @hw: pointer to hardware structure
5175  *  @speed: new link speed
5176  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
5177  *
5178  *  Set the link speed in the MAC and/or PHY register and restarts link.
5179  **/
5180 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
5181 					  ixgbe_link_speed speed,
5182 					  bool autoneg_wait_to_complete)
5183 {
5184 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
5185 	ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
5186 	s32 status = IXGBE_SUCCESS;
5187 	u32 speedcnt = 0;
5188 	u32 i = 0;
5189 	bool autoneg, link_up = FALSE;
5190 
5191 	DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
5192 
5193 	/* Mask off requested but non-supported speeds */
5194 	status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
5195 	if (status != IXGBE_SUCCESS)
5196 		return status;
5197 
5198 	speed &= link_speed;
5199 
5200 	/* Try each speed one by one, highest priority first.  We do this in
5201 	 * software because 10Gb fiber doesn't support speed autonegotiation.
5202 	 */
5203 	if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
5204 		speedcnt++;
5205 		highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
5206 
5207 		/* Set the module link speed */
5208 		switch (hw->phy.media_type) {
5209 		case ixgbe_media_type_fiber_fixed:
5210 		case ixgbe_media_type_fiber:
5211 			ixgbe_set_rate_select_speed(hw,
5212 						    IXGBE_LINK_SPEED_10GB_FULL);
5213 			break;
5214 		case ixgbe_media_type_fiber_qsfp:
5215 			/* QSFP module automatically detects MAC link speed */
5216 			break;
5217 		default:
5218 			DEBUGOUT("Unexpected media type.\n");
5219 			break;
5220 		}
5221 
5222 		/* Allow module to change analog characteristics (1G->10G) */
5223 		msec_delay(40);
5224 
5225 		status = ixgbe_setup_mac_link(hw,
5226 					      IXGBE_LINK_SPEED_10GB_FULL,
5227 					      autoneg_wait_to_complete);
5228 		if (status != IXGBE_SUCCESS)
5229 			return status;
5230 
5231 		/* Flap the Tx laser if it has not already been done */
5232 		ixgbe_flap_tx_laser(hw);
5233 
5234 		/* Wait for the controller to acquire link.  Per IEEE 802.3ap,
5235 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
5236 		 * attempted.  82599 uses the same timing for 10g SFI.
5237 		 */
5238 		for (i = 0; i < 5; i++) {
5239 			/* Wait for the link partner to also set speed */
5240 			msec_delay(100);
5241 
5242 			/* If we have link, just jump out */
5243 			status = ixgbe_check_link(hw, &link_speed,
5244 						  &link_up, FALSE);
5245 			if (status != IXGBE_SUCCESS)
5246 				return status;
5247 
5248 			if (link_up)
5249 				goto out;
5250 		}
5251 	}
5252 
5253 	if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
5254 		speedcnt++;
5255 		if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
5256 			highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
5257 
5258 		/* Set the module link speed */
5259 		switch (hw->phy.media_type) {
5260 		case ixgbe_media_type_fiber_fixed:
5261 		case ixgbe_media_type_fiber:
5262 			ixgbe_set_rate_select_speed(hw,
5263 						    IXGBE_LINK_SPEED_1GB_FULL);
5264 			break;
5265 		case ixgbe_media_type_fiber_qsfp:
5266 			/* QSFP module automatically detects link speed */
5267 			break;
5268 		default:
5269 			DEBUGOUT("Unexpected media type.\n");
5270 			break;
5271 		}
5272 
5273 		/* Allow module to change analog characteristics (10G->1G) */
5274 		msec_delay(40);
5275 
5276 		status = ixgbe_setup_mac_link(hw,
5277 					      IXGBE_LINK_SPEED_1GB_FULL,
5278 					      autoneg_wait_to_complete);
5279 		if (status != IXGBE_SUCCESS)
5280 			return status;
5281 
5282 		/* Flap the Tx laser if it has not already been done */
5283 		ixgbe_flap_tx_laser(hw);
5284 
5285 		/* Wait for the link partner to also set speed */
5286 		msec_delay(100);
5287 
5288 		/* If we have link, just jump out */
5289 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
5290 		if (status != IXGBE_SUCCESS)
5291 			return status;
5292 
5293 		if (link_up)
5294 			goto out;
5295 	}
5296 
5297 	/* We didn't get link.  Configure back to the highest speed we tried,
5298 	 * (if there was more than one).  We call ourselves back with just the
5299 	 * single highest speed that the user requested.
5300 	 */
5301 	if (speedcnt > 1)
5302 		status = ixgbe_setup_mac_link_multispeed_fiber(hw,
5303 						      highest_link_speed,
5304 						      autoneg_wait_to_complete);
5305 
5306 out:
5307 	/* Set autoneg_advertised value based on input link speed */
5308 	hw->phy.autoneg_advertised = 0;
5309 
5310 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
5311 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
5312 
5313 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
5314 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
5315 
5316 	return status;
5317 }
5318 
5319 /**
5320  *  ixgbe_set_soft_rate_select_speed - Set module link speed
5321  *  @hw: pointer to hardware structure
5322  *  @speed: link speed to set
5323  *
5324  *  Set module link speed via the soft rate select.
5325  */
5326 void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
5327 					ixgbe_link_speed speed)
5328 {
5329 	s32 status;
5330 	u8 rs, eeprom_data;
5331 
5332 	switch (speed) {
5333 	case IXGBE_LINK_SPEED_10GB_FULL:
5334 		/* one bit mask same as setting on */
5335 		rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
5336 		break;
5337 	case IXGBE_LINK_SPEED_1GB_FULL:
5338 		rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
5339 		break;
5340 	default:
5341 		DEBUGOUT("Invalid fixed module speed\n");
5342 		return;
5343 	}
5344 
5345 	/* Set RS0 */
5346 	status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
5347 					   IXGBE_I2C_EEPROM_DEV_ADDR2,
5348 					   &eeprom_data);
5349 	if (status) {
5350 		DEBUGOUT("Failed to read Rx Rate Select RS0\n");
5351 		goto out;
5352 	}
5353 
5354 	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
5355 
5356 	status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
5357 					    IXGBE_I2C_EEPROM_DEV_ADDR2,
5358 					    eeprom_data);
5359 	if (status) {
5360 		DEBUGOUT("Failed to write Rx Rate Select RS0\n");
5361 		goto out;
5362 	}
5363 
5364 	/* Set RS1 */
5365 	status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
5366 					   IXGBE_I2C_EEPROM_DEV_ADDR2,
5367 					   &eeprom_data);
5368 	if (status) {
5369 		DEBUGOUT("Failed to read Rx Rate Select RS1\n");
5370 		goto out;
5371 	}
5372 
5373 	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
5374 
5375 	status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
5376 					    IXGBE_I2C_EEPROM_DEV_ADDR2,
5377 					    eeprom_data);
5378 	if (status) {
5379 		DEBUGOUT("Failed to write Rx Rate Select RS1\n");
5380 		goto out;
5381 	}
5382 out:
5383 	return;
5384 }
5385