xref: /freebsd/sys/dev/ixgbe/ixgbe_common.c (revision cdc58367265a2bd6e8f913db2bdc591699ee229f)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2014, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "ixgbe_common.h"
36 #include "ixgbe_phy.h"
37 #include "ixgbe_dcb.h"
38 #include "ixgbe_dcb_82599.h"
39 #include "ixgbe_api.h"
40 
41 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
42 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
43 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
44 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
45 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
46 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
47 					u16 count);
48 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
49 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
50 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
51 static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
52 
53 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
54 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
55 					 u16 *san_mac_offset);
56 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
57 					     u16 words, u16 *data);
58 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
59 					      u16 words, u16 *data);
60 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
61 						 u16 offset);
62 
63 /**
64  *  ixgbe_init_ops_generic - Inits function ptrs
65  *  @hw: pointer to the hardware structure
66  *
67  *  Initialize the function pointers.
68  **/
69 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
70 {
71 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
72 	struct ixgbe_mac_info *mac = &hw->mac;
73 	u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
74 
75 	DEBUGFUNC("ixgbe_init_ops_generic");
76 
77 	/* EEPROM */
78 	eeprom->ops.init_params = ixgbe_init_eeprom_params_generic;
79 	/* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
80 	if (eec & IXGBE_EEC_PRES) {
81 		eeprom->ops.read = ixgbe_read_eerd_generic;
82 		eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic;
83 	} else {
84 		eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic;
85 		eeprom->ops.read_buffer =
86 				 ixgbe_read_eeprom_buffer_bit_bang_generic;
87 	}
88 	eeprom->ops.write = ixgbe_write_eeprom_generic;
89 	eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic;
90 	eeprom->ops.validate_checksum =
91 				      ixgbe_validate_eeprom_checksum_generic;
92 	eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic;
93 	eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic;
94 
95 	/* MAC */
96 	mac->ops.init_hw = ixgbe_init_hw_generic;
97 	mac->ops.reset_hw = NULL;
98 	mac->ops.start_hw = ixgbe_start_hw_generic;
99 	mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic;
100 	mac->ops.get_media_type = NULL;
101 	mac->ops.get_supported_physical_layer = NULL;
102 	mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic;
103 	mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic;
104 	mac->ops.stop_adapter = ixgbe_stop_adapter_generic;
105 	mac->ops.get_bus_info = ixgbe_get_bus_info_generic;
106 	mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie;
107 	mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync;
108 	mac->ops.release_swfw_sync = ixgbe_release_swfw_sync;
109 	mac->ops.prot_autoc_read = prot_autoc_read_generic;
110 	mac->ops.prot_autoc_write = prot_autoc_write_generic;
111 
112 	/* LEDs */
113 	mac->ops.led_on = ixgbe_led_on_generic;
114 	mac->ops.led_off = ixgbe_led_off_generic;
115 	mac->ops.blink_led_start = ixgbe_blink_led_start_generic;
116 	mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic;
117 
118 	/* RAR, Multicast, VLAN */
119 	mac->ops.set_rar = ixgbe_set_rar_generic;
120 	mac->ops.clear_rar = ixgbe_clear_rar_generic;
121 	mac->ops.insert_mac_addr = NULL;
122 	mac->ops.set_vmdq = NULL;
123 	mac->ops.clear_vmdq = NULL;
124 	mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic;
125 	mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic;
126 	mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic;
127 	mac->ops.enable_mc = ixgbe_enable_mc_generic;
128 	mac->ops.disable_mc = ixgbe_disable_mc_generic;
129 	mac->ops.clear_vfta = NULL;
130 	mac->ops.set_vfta = NULL;
131 	mac->ops.set_vlvf = NULL;
132 	mac->ops.init_uta_tables = NULL;
133 	mac->ops.enable_rx = ixgbe_enable_rx_generic;
134 	mac->ops.disable_rx = ixgbe_disable_rx_generic;
135 
136 	/* Flow Control */
137 	mac->ops.fc_enable = ixgbe_fc_enable_generic;
138 	mac->ops.setup_fc = ixgbe_setup_fc_generic;
139 
140 	/* Link */
141 	mac->ops.get_link_capabilities = NULL;
142 	mac->ops.setup_link = NULL;
143 	mac->ops.check_link = NULL;
144 	mac->ops.dmac_config = NULL;
145 	mac->ops.dmac_update_tcs = NULL;
146 	mac->ops.dmac_config_tcs = NULL;
147 
148 	return IXGBE_SUCCESS;
149 }
150 
151 /**
152  * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
153  * of flow control
154  * @hw: pointer to hardware structure
155  *
156  * This function returns TRUE if the device supports flow control
157  * autonegotiation, and FALSE if it does not.
158  *
159  **/
160 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
161 {
162 	bool supported = FALSE;
163 	ixgbe_link_speed speed;
164 	bool link_up;
165 
166 	DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
167 
168 	switch (hw->phy.media_type) {
169 	case ixgbe_media_type_fiber_fixed:
170 	case ixgbe_media_type_fiber_qsfp:
171 	case ixgbe_media_type_fiber:
172 		hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
173 		/* if link is down, assume supported */
174 		if (link_up)
175 			supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
176 				TRUE : FALSE;
177 		else
178 			supported = TRUE;
179 		break;
180 	case ixgbe_media_type_backplane:
181 		supported = TRUE;
182 		break;
183 	case ixgbe_media_type_copper:
184 		/* only some copper devices support flow control autoneg */
185 		switch (hw->device_id) {
186 		case IXGBE_DEV_ID_82599_T3_LOM:
187 		case IXGBE_DEV_ID_X540T:
188 		case IXGBE_DEV_ID_X540T1:
189 		case IXGBE_DEV_ID_X540_BYPASS:
190 		case IXGBE_DEV_ID_X550T:
191 			supported = TRUE;
192 			break;
193 		default:
194 			supported = FALSE;
195 		}
196 	default:
197 		break;
198 	}
199 
200 	ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
201 		      "Device %x does not support flow control autoneg",
202 		      hw->device_id);
203 	return supported;
204 }
205 
206 /**
207  *  ixgbe_setup_fc_generic - Set up flow control
208  *  @hw: pointer to hardware structure
209  *
210  *  Called at init time to set up flow control.
211  **/
212 s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
213 {
214 	s32 ret_val = IXGBE_SUCCESS;
215 	u32 reg = 0, reg_bp = 0;
216 	u16 reg_cu = 0;
217 	bool locked = FALSE;
218 
219 	DEBUGFUNC("ixgbe_setup_fc_generic");
220 
221 	/* Validate the requested mode */
222 	if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
223 		ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
224 			   "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
225 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
226 		goto out;
227 	}
228 
229 	/*
230 	 * 10gig parts do not have a word in the EEPROM to determine the
231 	 * default flow control setting, so we explicitly set it to full.
232 	 */
233 	if (hw->fc.requested_mode == ixgbe_fc_default)
234 		hw->fc.requested_mode = ixgbe_fc_full;
235 
236 	/*
237 	 * Set up the 1G and 10G flow control advertisement registers so the
238 	 * HW will be able to do fc autoneg once the cable is plugged in.  If
239 	 * we link at 10G, the 1G advertisement is harmless and vice versa.
240 	 */
241 	switch (hw->phy.media_type) {
242 	case ixgbe_media_type_backplane:
243 		/* some MAC's need RMW protection on AUTOC */
244 		ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &reg_bp);
245 		if (ret_val != IXGBE_SUCCESS)
246 			goto out;
247 
248 		/* only backplane uses autoc so fall though */
249 	case ixgbe_media_type_fiber_fixed:
250 	case ixgbe_media_type_fiber_qsfp:
251 	case ixgbe_media_type_fiber:
252 		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
253 
254 		break;
255 	case ixgbe_media_type_copper:
256 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
257 				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg_cu);
258 		break;
259 	default:
260 		break;
261 	}
262 
263 	/*
264 	 * The possible values of fc.requested_mode are:
265 	 * 0: Flow control is completely disabled
266 	 * 1: Rx flow control is enabled (we can receive pause frames,
267 	 *    but not send pause frames).
268 	 * 2: Tx flow control is enabled (we can send pause frames but
269 	 *    we do not support receiving pause frames).
270 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
271 	 * other: Invalid.
272 	 */
273 	switch (hw->fc.requested_mode) {
274 	case ixgbe_fc_none:
275 		/* Flow control completely disabled by software override. */
276 		reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
277 		if (hw->phy.media_type == ixgbe_media_type_backplane)
278 			reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
279 				    IXGBE_AUTOC_ASM_PAUSE);
280 		else if (hw->phy.media_type == ixgbe_media_type_copper)
281 			reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
282 		break;
283 	case ixgbe_fc_tx_pause:
284 		/*
285 		 * Tx Flow control is enabled, and Rx Flow control is
286 		 * disabled by software override.
287 		 */
288 		reg |= IXGBE_PCS1GANA_ASM_PAUSE;
289 		reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
290 		if (hw->phy.media_type == ixgbe_media_type_backplane) {
291 			reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
292 			reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
293 		} else if (hw->phy.media_type == ixgbe_media_type_copper) {
294 			reg_cu |= IXGBE_TAF_ASM_PAUSE;
295 			reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
296 		}
297 		break;
298 	case ixgbe_fc_rx_pause:
299 		/*
300 		 * Rx Flow control is enabled and Tx Flow control is
301 		 * disabled by software override. Since there really
302 		 * isn't a way to advertise that we are capable of RX
303 		 * Pause ONLY, we will advertise that we support both
304 		 * symmetric and asymmetric Rx PAUSE, as such we fall
305 		 * through to the fc_full statement.  Later, we will
306 		 * disable the adapter's ability to send PAUSE frames.
307 		 */
308 	case ixgbe_fc_full:
309 		/* Flow control (both Rx and Tx) is enabled by SW override. */
310 		reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
311 		if (hw->phy.media_type == ixgbe_media_type_backplane)
312 			reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
313 				  IXGBE_AUTOC_ASM_PAUSE;
314 		else if (hw->phy.media_type == ixgbe_media_type_copper)
315 			reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
316 		break;
317 	default:
318 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
319 			     "Flow control param set incorrectly\n");
320 		ret_val = IXGBE_ERR_CONFIG;
321 		goto out;
322 		break;
323 	}
324 
325 	if (hw->mac.type < ixgbe_mac_X540) {
326 		/*
327 		 * Enable auto-negotiation between the MAC & PHY;
328 		 * the MAC will advertise clause 37 flow control.
329 		 */
330 		IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
331 		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
332 
333 		/* Disable AN timeout */
334 		if (hw->fc.strict_ieee)
335 			reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
336 
337 		IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
338 		DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
339 	}
340 
341 	/*
342 	 * AUTOC restart handles negotiation of 1G and 10G on backplane
343 	 * and copper. There is no need to set the PCS1GCTL register.
344 	 *
345 	 */
346 	if (hw->phy.media_type == ixgbe_media_type_backplane) {
347 		reg_bp |= IXGBE_AUTOC_AN_RESTART;
348 		ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
349 		if (ret_val)
350 			goto out;
351 	} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
352 		    (ixgbe_device_supports_autoneg_fc(hw))) {
353 		hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
354 				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
355 	}
356 
357 	DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
358 out:
359 	return ret_val;
360 }
361 
362 /**
363  *  ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
364  *  @hw: pointer to hardware structure
365  *
366  *  Starts the hardware by filling the bus info structure and media type, clears
367  *  all on chip counters, initializes receive address registers, multicast
368  *  table, VLAN filter table, calls routine to set up link and flow control
369  *  settings, and leaves transmit and receive units disabled and uninitialized
370  **/
371 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
372 {
373 	s32 ret_val;
374 	u32 ctrl_ext;
375 
376 	DEBUGFUNC("ixgbe_start_hw_generic");
377 
378 	/* Set the media type */
379 	hw->phy.media_type = hw->mac.ops.get_media_type(hw);
380 
381 	/* PHY ops initialization must be done in reset_hw() */
382 
383 	/* Clear the VLAN filter table */
384 	hw->mac.ops.clear_vfta(hw);
385 
386 	/* Clear statistics registers */
387 	hw->mac.ops.clear_hw_cntrs(hw);
388 
389 	/* Set No Snoop Disable */
390 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
391 	ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
392 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
393 	IXGBE_WRITE_FLUSH(hw);
394 
395 	/* Setup flow control */
396 	ret_val = ixgbe_setup_fc(hw);
397 	if (ret_val != IXGBE_SUCCESS)
398 		goto out;
399 
400 	/* Clear adapter stopped flag */
401 	hw->adapter_stopped = FALSE;
402 
403 out:
404 	return ret_val;
405 }
406 
407 /**
408  *  ixgbe_start_hw_gen2 - Init sequence for common device family
409  *  @hw: pointer to hw structure
410  *
411  * Performs the init sequence common to the second generation
412  * of 10 GbE devices.
413  * Devices in the second generation:
414  *     82599
415  *     X540
416  **/
417 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
418 {
419 	u32 i;
420 	u32 regval;
421 
422 	/* Clear the rate limiters */
423 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
424 		IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
425 		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
426 	}
427 	IXGBE_WRITE_FLUSH(hw);
428 
429 	/* Disable relaxed ordering */
430 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
431 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
432 		regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
433 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
434 	}
435 
436 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
437 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
438 		regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
439 			    IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
440 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
441 	}
442 
443 	return IXGBE_SUCCESS;
444 }
445 
446 /**
447  *  ixgbe_init_hw_generic - Generic hardware initialization
448  *  @hw: pointer to hardware structure
449  *
450  *  Initialize the hardware by resetting the hardware, filling the bus info
451  *  structure and media type, clears all on chip counters, initializes receive
452  *  address registers, multicast table, VLAN filter table, calls routine to set
453  *  up link and flow control settings, and leaves transmit and receive units
454  *  disabled and uninitialized
455  **/
456 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
457 {
458 	s32 status;
459 
460 	DEBUGFUNC("ixgbe_init_hw_generic");
461 
462 	/* Reset the hardware */
463 	status = hw->mac.ops.reset_hw(hw);
464 
465 	if (status == IXGBE_SUCCESS) {
466 		/* Start the HW */
467 		status = hw->mac.ops.start_hw(hw);
468 	}
469 
470 	return status;
471 }
472 
473 /**
474  *  ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
475  *  @hw: pointer to hardware structure
476  *
477  *  Clears all hardware statistics counters by reading them from the hardware
478  *  Statistics counters are clear on read.
479  **/
480 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
481 {
482 	u16 i = 0;
483 
484 	DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
485 
486 	IXGBE_READ_REG(hw, IXGBE_CRCERRS);
487 	IXGBE_READ_REG(hw, IXGBE_ILLERRC);
488 	IXGBE_READ_REG(hw, IXGBE_ERRBC);
489 	IXGBE_READ_REG(hw, IXGBE_MSPDC);
490 	for (i = 0; i < 8; i++)
491 		IXGBE_READ_REG(hw, IXGBE_MPC(i));
492 
493 	IXGBE_READ_REG(hw, IXGBE_MLFC);
494 	IXGBE_READ_REG(hw, IXGBE_MRFC);
495 	IXGBE_READ_REG(hw, IXGBE_RLEC);
496 	IXGBE_READ_REG(hw, IXGBE_LXONTXC);
497 	IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
498 	if (hw->mac.type >= ixgbe_mac_82599EB) {
499 		IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
500 		IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
501 	} else {
502 		IXGBE_READ_REG(hw, IXGBE_LXONRXC);
503 		IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
504 	}
505 
506 	for (i = 0; i < 8; i++) {
507 		IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
508 		IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
509 		if (hw->mac.type >= ixgbe_mac_82599EB) {
510 			IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
511 			IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
512 		} else {
513 			IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
514 			IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
515 		}
516 	}
517 	if (hw->mac.type >= ixgbe_mac_82599EB)
518 		for (i = 0; i < 8; i++)
519 			IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
520 	IXGBE_READ_REG(hw, IXGBE_PRC64);
521 	IXGBE_READ_REG(hw, IXGBE_PRC127);
522 	IXGBE_READ_REG(hw, IXGBE_PRC255);
523 	IXGBE_READ_REG(hw, IXGBE_PRC511);
524 	IXGBE_READ_REG(hw, IXGBE_PRC1023);
525 	IXGBE_READ_REG(hw, IXGBE_PRC1522);
526 	IXGBE_READ_REG(hw, IXGBE_GPRC);
527 	IXGBE_READ_REG(hw, IXGBE_BPRC);
528 	IXGBE_READ_REG(hw, IXGBE_MPRC);
529 	IXGBE_READ_REG(hw, IXGBE_GPTC);
530 	IXGBE_READ_REG(hw, IXGBE_GORCL);
531 	IXGBE_READ_REG(hw, IXGBE_GORCH);
532 	IXGBE_READ_REG(hw, IXGBE_GOTCL);
533 	IXGBE_READ_REG(hw, IXGBE_GOTCH);
534 	if (hw->mac.type == ixgbe_mac_82598EB)
535 		for (i = 0; i < 8; i++)
536 			IXGBE_READ_REG(hw, IXGBE_RNBC(i));
537 	IXGBE_READ_REG(hw, IXGBE_RUC);
538 	IXGBE_READ_REG(hw, IXGBE_RFC);
539 	IXGBE_READ_REG(hw, IXGBE_ROC);
540 	IXGBE_READ_REG(hw, IXGBE_RJC);
541 	IXGBE_READ_REG(hw, IXGBE_MNGPRC);
542 	IXGBE_READ_REG(hw, IXGBE_MNGPDC);
543 	IXGBE_READ_REG(hw, IXGBE_MNGPTC);
544 	IXGBE_READ_REG(hw, IXGBE_TORL);
545 	IXGBE_READ_REG(hw, IXGBE_TORH);
546 	IXGBE_READ_REG(hw, IXGBE_TPR);
547 	IXGBE_READ_REG(hw, IXGBE_TPT);
548 	IXGBE_READ_REG(hw, IXGBE_PTC64);
549 	IXGBE_READ_REG(hw, IXGBE_PTC127);
550 	IXGBE_READ_REG(hw, IXGBE_PTC255);
551 	IXGBE_READ_REG(hw, IXGBE_PTC511);
552 	IXGBE_READ_REG(hw, IXGBE_PTC1023);
553 	IXGBE_READ_REG(hw, IXGBE_PTC1522);
554 	IXGBE_READ_REG(hw, IXGBE_MPTC);
555 	IXGBE_READ_REG(hw, IXGBE_BPTC);
556 	for (i = 0; i < 16; i++) {
557 		IXGBE_READ_REG(hw, IXGBE_QPRC(i));
558 		IXGBE_READ_REG(hw, IXGBE_QPTC(i));
559 		if (hw->mac.type >= ixgbe_mac_82599EB) {
560 			IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
561 			IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
562 			IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
563 			IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
564 			IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
565 		} else {
566 			IXGBE_READ_REG(hw, IXGBE_QBRC(i));
567 			IXGBE_READ_REG(hw, IXGBE_QBTC(i));
568 		}
569 	}
570 
571 	if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
572 		if (hw->phy.id == 0)
573 			ixgbe_identify_phy(hw);
574 		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
575 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
576 		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
577 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
578 		hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
579 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
580 		hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
581 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
582 	}
583 
584 	return IXGBE_SUCCESS;
585 }
586 
587 /**
588  *  ixgbe_read_pba_string_generic - Reads part number string from EEPROM
589  *  @hw: pointer to hardware structure
590  *  @pba_num: stores the part number string from the EEPROM
591  *  @pba_num_size: part number string buffer length
592  *
593  *  Reads the part number string from the EEPROM.
594  **/
595 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
596 				  u32 pba_num_size)
597 {
598 	s32 ret_val;
599 	u16 data;
600 	u16 pba_ptr;
601 	u16 offset;
602 	u16 length;
603 
604 	DEBUGFUNC("ixgbe_read_pba_string_generic");
605 
606 	if (pba_num == NULL) {
607 		DEBUGOUT("PBA string buffer was null\n");
608 		return IXGBE_ERR_INVALID_ARGUMENT;
609 	}
610 
611 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
612 	if (ret_val) {
613 		DEBUGOUT("NVM Read Error\n");
614 		return ret_val;
615 	}
616 
617 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
618 	if (ret_val) {
619 		DEBUGOUT("NVM Read Error\n");
620 		return ret_val;
621 	}
622 
623 	/*
624 	 * if data is not ptr guard the PBA must be in legacy format which
625 	 * means pba_ptr is actually our second data word for the PBA number
626 	 * and we can decode it into an ascii string
627 	 */
628 	if (data != IXGBE_PBANUM_PTR_GUARD) {
629 		DEBUGOUT("NVM PBA number is not stored as string\n");
630 
631 		/* we will need 11 characters to store the PBA */
632 		if (pba_num_size < 11) {
633 			DEBUGOUT("PBA string buffer too small\n");
634 			return IXGBE_ERR_NO_SPACE;
635 		}
636 
637 		/* extract hex string from data and pba_ptr */
638 		pba_num[0] = (data >> 12) & 0xF;
639 		pba_num[1] = (data >> 8) & 0xF;
640 		pba_num[2] = (data >> 4) & 0xF;
641 		pba_num[3] = data & 0xF;
642 		pba_num[4] = (pba_ptr >> 12) & 0xF;
643 		pba_num[5] = (pba_ptr >> 8) & 0xF;
644 		pba_num[6] = '-';
645 		pba_num[7] = 0;
646 		pba_num[8] = (pba_ptr >> 4) & 0xF;
647 		pba_num[9] = pba_ptr & 0xF;
648 
649 		/* put a null character on the end of our string */
650 		pba_num[10] = '\0';
651 
652 		/* switch all the data but the '-' to hex char */
653 		for (offset = 0; offset < 10; offset++) {
654 			if (pba_num[offset] < 0xA)
655 				pba_num[offset] += '0';
656 			else if (pba_num[offset] < 0x10)
657 				pba_num[offset] += 'A' - 0xA;
658 		}
659 
660 		return IXGBE_SUCCESS;
661 	}
662 
663 	ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
664 	if (ret_val) {
665 		DEBUGOUT("NVM Read Error\n");
666 		return ret_val;
667 	}
668 
669 	if (length == 0xFFFF || length == 0) {
670 		DEBUGOUT("NVM PBA number section invalid length\n");
671 		return IXGBE_ERR_PBA_SECTION;
672 	}
673 
674 	/* check if pba_num buffer is big enough */
675 	if (pba_num_size  < (((u32)length * 2) - 1)) {
676 		DEBUGOUT("PBA string buffer too small\n");
677 		return IXGBE_ERR_NO_SPACE;
678 	}
679 
680 	/* trim pba length from start of string */
681 	pba_ptr++;
682 	length--;
683 
684 	for (offset = 0; offset < length; offset++) {
685 		ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
686 		if (ret_val) {
687 			DEBUGOUT("NVM Read Error\n");
688 			return ret_val;
689 		}
690 		pba_num[offset * 2] = (u8)(data >> 8);
691 		pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
692 	}
693 	pba_num[offset * 2] = '\0';
694 
695 	return IXGBE_SUCCESS;
696 }
697 
698 /**
699  *  ixgbe_read_pba_num_generic - Reads part number from EEPROM
700  *  @hw: pointer to hardware structure
701  *  @pba_num: stores the part number from the EEPROM
702  *
703  *  Reads the part number from the EEPROM.
704  **/
705 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
706 {
707 	s32 ret_val;
708 	u16 data;
709 
710 	DEBUGFUNC("ixgbe_read_pba_num_generic");
711 
712 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
713 	if (ret_val) {
714 		DEBUGOUT("NVM Read Error\n");
715 		return ret_val;
716 	} else if (data == IXGBE_PBANUM_PTR_GUARD) {
717 		DEBUGOUT("NVM Not supported\n");
718 		return IXGBE_NOT_IMPLEMENTED;
719 	}
720 	*pba_num = (u32)(data << 16);
721 
722 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
723 	if (ret_val) {
724 		DEBUGOUT("NVM Read Error\n");
725 		return ret_val;
726 	}
727 	*pba_num |= data;
728 
729 	return IXGBE_SUCCESS;
730 }
731 
732 /**
733  *  ixgbe_read_pba_raw
734  *  @hw: pointer to the HW structure
735  *  @eeprom_buf: optional pointer to EEPROM image
736  *  @eeprom_buf_size: size of EEPROM image in words
737  *  @max_pba_block_size: PBA block size limit
738  *  @pba: pointer to output PBA structure
739  *
740  *  Reads PBA from EEPROM image when eeprom_buf is not NULL.
741  *  Reads PBA from physical EEPROM device when eeprom_buf is NULL.
742  *
743  **/
744 s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
745 		       u32 eeprom_buf_size, u16 max_pba_block_size,
746 		       struct ixgbe_pba *pba)
747 {
748 	s32 ret_val;
749 	u16 pba_block_size;
750 
751 	if (pba == NULL)
752 		return IXGBE_ERR_PARAM;
753 
754 	if (eeprom_buf == NULL) {
755 		ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
756 						     &pba->word[0]);
757 		if (ret_val)
758 			return ret_val;
759 	} else {
760 		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
761 			pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
762 			pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
763 		} else {
764 			return IXGBE_ERR_PARAM;
765 		}
766 	}
767 
768 	if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
769 		if (pba->pba_block == NULL)
770 			return IXGBE_ERR_PARAM;
771 
772 		ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
773 						   eeprom_buf_size,
774 						   &pba_block_size);
775 		if (ret_val)
776 			return ret_val;
777 
778 		if (pba_block_size > max_pba_block_size)
779 			return IXGBE_ERR_PARAM;
780 
781 		if (eeprom_buf == NULL) {
782 			ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
783 							     pba_block_size,
784 							     pba->pba_block);
785 			if (ret_val)
786 				return ret_val;
787 		} else {
788 			if (eeprom_buf_size > (u32)(pba->word[1] +
789 					      pba_block_size)) {
790 				memcpy(pba->pba_block,
791 				       &eeprom_buf[pba->word[1]],
792 				       pba_block_size * sizeof(u16));
793 			} else {
794 				return IXGBE_ERR_PARAM;
795 			}
796 		}
797 	}
798 
799 	return IXGBE_SUCCESS;
800 }
801 
802 /**
803  *  ixgbe_write_pba_raw
804  *  @hw: pointer to the HW structure
805  *  @eeprom_buf: optional pointer to EEPROM image
806  *  @eeprom_buf_size: size of EEPROM image in words
807  *  @pba: pointer to PBA structure
808  *
809  *  Writes PBA to EEPROM image when eeprom_buf is not NULL.
810  *  Writes PBA to physical EEPROM device when eeprom_buf is NULL.
811  *
812  **/
813 s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
814 			u32 eeprom_buf_size, struct ixgbe_pba *pba)
815 {
816 	s32 ret_val;
817 
818 	if (pba == NULL)
819 		return IXGBE_ERR_PARAM;
820 
821 	if (eeprom_buf == NULL) {
822 		ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
823 						      &pba->word[0]);
824 		if (ret_val)
825 			return ret_val;
826 	} else {
827 		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
828 			eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
829 			eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
830 		} else {
831 			return IXGBE_ERR_PARAM;
832 		}
833 	}
834 
835 	if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
836 		if (pba->pba_block == NULL)
837 			return IXGBE_ERR_PARAM;
838 
839 		if (eeprom_buf == NULL) {
840 			ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
841 							      pba->pba_block[0],
842 							      pba->pba_block);
843 			if (ret_val)
844 				return ret_val;
845 		} else {
846 			if (eeprom_buf_size > (u32)(pba->word[1] +
847 					      pba->pba_block[0])) {
848 				memcpy(&eeprom_buf[pba->word[1]],
849 				       pba->pba_block,
850 				       pba->pba_block[0] * sizeof(u16));
851 			} else {
852 				return IXGBE_ERR_PARAM;
853 			}
854 		}
855 	}
856 
857 	return IXGBE_SUCCESS;
858 }
859 
860 /**
861  *  ixgbe_get_pba_block_size
862  *  @hw: pointer to the HW structure
863  *  @eeprom_buf: optional pointer to EEPROM image
864  *  @eeprom_buf_size: size of EEPROM image in words
865  *  @pba_data_size: pointer to output variable
866  *
867  *  Returns the size of the PBA block in words. Function operates on EEPROM
868  *  image if the eeprom_buf pointer is not NULL otherwise it accesses physical
869  *  EEPROM device.
870  *
871  **/
872 s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
873 			     u32 eeprom_buf_size, u16 *pba_block_size)
874 {
875 	s32 ret_val;
876 	u16 pba_word[2];
877 	u16 length;
878 
879 	DEBUGFUNC("ixgbe_get_pba_block_size");
880 
881 	if (eeprom_buf == NULL) {
882 		ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
883 						     &pba_word[0]);
884 		if (ret_val)
885 			return ret_val;
886 	} else {
887 		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
888 			pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
889 			pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
890 		} else {
891 			return IXGBE_ERR_PARAM;
892 		}
893 	}
894 
895 	if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
896 		if (eeprom_buf == NULL) {
897 			ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
898 						      &length);
899 			if (ret_val)
900 				return ret_val;
901 		} else {
902 			if (eeprom_buf_size > pba_word[1])
903 				length = eeprom_buf[pba_word[1] + 0];
904 			else
905 				return IXGBE_ERR_PARAM;
906 		}
907 
908 		if (length == 0xFFFF || length == 0)
909 			return IXGBE_ERR_PBA_SECTION;
910 	} else {
911 		/* PBA number in legacy format, there is no PBA Block. */
912 		length = 0;
913 	}
914 
915 	if (pba_block_size != NULL)
916 		*pba_block_size = length;
917 
918 	return IXGBE_SUCCESS;
919 }
920 
921 /**
922  *  ixgbe_get_mac_addr_generic - Generic get MAC address
923  *  @hw: pointer to hardware structure
924  *  @mac_addr: Adapter MAC address
925  *
926  *  Reads the adapter's MAC address from first Receive Address Register (RAR0)
927  *  A reset of the adapter must be performed prior to calling this function
928  *  in order for the MAC address to have been loaded from the EEPROM into RAR0
929  **/
930 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
931 {
932 	u32 rar_high;
933 	u32 rar_low;
934 	u16 i;
935 
936 	DEBUGFUNC("ixgbe_get_mac_addr_generic");
937 
938 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
939 	rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
940 
941 	for (i = 0; i < 4; i++)
942 		mac_addr[i] = (u8)(rar_low >> (i*8));
943 
944 	for (i = 0; i < 2; i++)
945 		mac_addr[i+4] = (u8)(rar_high >> (i*8));
946 
947 	return IXGBE_SUCCESS;
948 }
949 
950 /**
951  *  ixgbe_set_pci_config_data_generic - Generic store PCI bus info
952  *  @hw: pointer to hardware structure
953  *  @link_status: the link status returned by the PCI config space
954  *
955  *  Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure
956  **/
957 void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
958 {
959 	struct ixgbe_mac_info *mac = &hw->mac;
960 
961 	if (hw->bus.type == ixgbe_bus_type_unknown)
962 		hw->bus.type = ixgbe_bus_type_pci_express;
963 
964 	switch (link_status & IXGBE_PCI_LINK_WIDTH) {
965 	case IXGBE_PCI_LINK_WIDTH_1:
966 		hw->bus.width = ixgbe_bus_width_pcie_x1;
967 		break;
968 	case IXGBE_PCI_LINK_WIDTH_2:
969 		hw->bus.width = ixgbe_bus_width_pcie_x2;
970 		break;
971 	case IXGBE_PCI_LINK_WIDTH_4:
972 		hw->bus.width = ixgbe_bus_width_pcie_x4;
973 		break;
974 	case IXGBE_PCI_LINK_WIDTH_8:
975 		hw->bus.width = ixgbe_bus_width_pcie_x8;
976 		break;
977 	default:
978 		hw->bus.width = ixgbe_bus_width_unknown;
979 		break;
980 	}
981 
982 	switch (link_status & IXGBE_PCI_LINK_SPEED) {
983 	case IXGBE_PCI_LINK_SPEED_2500:
984 		hw->bus.speed = ixgbe_bus_speed_2500;
985 		break;
986 	case IXGBE_PCI_LINK_SPEED_5000:
987 		hw->bus.speed = ixgbe_bus_speed_5000;
988 		break;
989 	case IXGBE_PCI_LINK_SPEED_8000:
990 		hw->bus.speed = ixgbe_bus_speed_8000;
991 		break;
992 	default:
993 		hw->bus.speed = ixgbe_bus_speed_unknown;
994 		break;
995 	}
996 
997 	mac->ops.set_lan_id(hw);
998 }
999 
1000 /**
1001  *  ixgbe_get_bus_info_generic - Generic set PCI bus info
1002  *  @hw: pointer to hardware structure
1003  *
1004  *  Gets the PCI bus info (speed, width, type) then calls helper function to
1005  *  store this data within the ixgbe_hw structure.
1006  **/
1007 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
1008 {
1009 	u16 link_status;
1010 
1011 	DEBUGFUNC("ixgbe_get_bus_info_generic");
1012 
1013 	/* Get the negotiated link width and speed from PCI config space */
1014 	link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
1015 
1016 	ixgbe_set_pci_config_data_generic(hw, link_status);
1017 
1018 	return IXGBE_SUCCESS;
1019 }
1020 
1021 /**
1022  *  ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
1023  *  @hw: pointer to the HW structure
1024  *
1025  *  Determines the LAN function id by reading memory-mapped registers
1026  *  and swaps the port value if requested.
1027  **/
1028 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
1029 {
1030 	struct ixgbe_bus_info *bus = &hw->bus;
1031 	u32 reg;
1032 
1033 	DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
1034 
1035 	reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
1036 	bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
1037 	bus->lan_id = bus->func;
1038 
1039 	/* check for a port swap */
1040 	reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
1041 	if (reg & IXGBE_FACTPS_LFS)
1042 		bus->func ^= 0x1;
1043 }
1044 
1045 /**
1046  *  ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
1047  *  @hw: pointer to hardware structure
1048  *
1049  *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
1050  *  disables transmit and receive units. The adapter_stopped flag is used by
1051  *  the shared code and drivers to determine if the adapter is in a stopped
1052  *  state and should not touch the hardware.
1053  **/
1054 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
1055 {
1056 	u32 reg_val;
1057 	u16 i;
1058 
1059 	DEBUGFUNC("ixgbe_stop_adapter_generic");
1060 
1061 	/*
1062 	 * Set the adapter_stopped flag so other driver functions stop touching
1063 	 * the hardware
1064 	 */
1065 	hw->adapter_stopped = TRUE;
1066 
1067 	/* Disable the receive unit */
1068 	ixgbe_disable_rx(hw);
1069 
1070 	/* Clear interrupt mask to stop interrupts from being generated */
1071 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
1072 
1073 	/* Clear any pending interrupts, flush previous writes */
1074 	IXGBE_READ_REG(hw, IXGBE_EICR);
1075 
1076 	/* Disable the transmit unit.  Each queue must be disabled. */
1077 	for (i = 0; i < hw->mac.max_tx_queues; i++)
1078 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
1079 
1080 	/* Disable the receive unit by stopping each queue */
1081 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
1082 		reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1083 		reg_val &= ~IXGBE_RXDCTL_ENABLE;
1084 		reg_val |= IXGBE_RXDCTL_SWFLSH;
1085 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
1086 	}
1087 
1088 	/* flush all queues disables */
1089 	IXGBE_WRITE_FLUSH(hw);
1090 	msec_delay(2);
1091 
1092 	/*
1093 	 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
1094 	 * access and verify no pending requests
1095 	 */
1096 	return ixgbe_disable_pcie_master(hw);
1097 }
1098 
1099 /**
1100  *  ixgbe_led_on_generic - Turns on the software controllable LEDs.
1101  *  @hw: pointer to hardware structure
1102  *  @index: led number to turn on
1103  **/
1104 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
1105 {
1106 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1107 
1108 	DEBUGFUNC("ixgbe_led_on_generic");
1109 
1110 	/* To turn on the LED, set mode to ON. */
1111 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
1112 	led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
1113 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1114 	IXGBE_WRITE_FLUSH(hw);
1115 
1116 	return IXGBE_SUCCESS;
1117 }
1118 
1119 /**
1120  *  ixgbe_led_off_generic - Turns off the software controllable LEDs.
1121  *  @hw: pointer to hardware structure
1122  *  @index: led number to turn off
1123  **/
1124 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
1125 {
1126 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1127 
1128 	DEBUGFUNC("ixgbe_led_off_generic");
1129 
1130 	/* To turn off the LED, set mode to OFF. */
1131 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
1132 	led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
1133 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1134 	IXGBE_WRITE_FLUSH(hw);
1135 
1136 	return IXGBE_SUCCESS;
1137 }
1138 
1139 /**
1140  *  ixgbe_init_eeprom_params_generic - Initialize EEPROM params
1141  *  @hw: pointer to hardware structure
1142  *
1143  *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
1144  *  ixgbe_hw struct in order to set up EEPROM access.
1145  **/
1146 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
1147 {
1148 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1149 	u32 eec;
1150 	u16 eeprom_size;
1151 
1152 	DEBUGFUNC("ixgbe_init_eeprom_params_generic");
1153 
1154 	if (eeprom->type == ixgbe_eeprom_uninitialized) {
1155 		eeprom->type = ixgbe_eeprom_none;
1156 		/* Set default semaphore delay to 10ms which is a well
1157 		 * tested value */
1158 		eeprom->semaphore_delay = 10;
1159 		/* Clear EEPROM page size, it will be initialized as needed */
1160 		eeprom->word_page_size = 0;
1161 
1162 		/*
1163 		 * Check for EEPROM present first.
1164 		 * If not present leave as none
1165 		 */
1166 		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1167 		if (eec & IXGBE_EEC_PRES) {
1168 			eeprom->type = ixgbe_eeprom_spi;
1169 
1170 			/*
1171 			 * SPI EEPROM is assumed here.  This code would need to
1172 			 * change if a future EEPROM is not SPI.
1173 			 */
1174 			eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1175 					    IXGBE_EEC_SIZE_SHIFT);
1176 			eeprom->word_size = 1 << (eeprom_size +
1177 					     IXGBE_EEPROM_WORD_SIZE_SHIFT);
1178 		}
1179 
1180 		if (eec & IXGBE_EEC_ADDR_SIZE)
1181 			eeprom->address_bits = 16;
1182 		else
1183 			eeprom->address_bits = 8;
1184 		DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
1185 			  "%d\n", eeprom->type, eeprom->word_size,
1186 			  eeprom->address_bits);
1187 	}
1188 
1189 	return IXGBE_SUCCESS;
1190 }
1191 
1192 /**
1193  *  ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
1194  *  @hw: pointer to hardware structure
1195  *  @offset: offset within the EEPROM to write
1196  *  @words: number of word(s)
1197  *  @data: 16 bit word(s) to write to EEPROM
1198  *
1199  *  Reads 16 bit word(s) from EEPROM through bit-bang method
1200  **/
1201 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1202 					       u16 words, u16 *data)
1203 {
1204 	s32 status = IXGBE_SUCCESS;
1205 	u16 i, count;
1206 
1207 	DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
1208 
1209 	hw->eeprom.ops.init_params(hw);
1210 
1211 	if (words == 0) {
1212 		status = IXGBE_ERR_INVALID_ARGUMENT;
1213 		goto out;
1214 	}
1215 
1216 	if (offset + words > hw->eeprom.word_size) {
1217 		status = IXGBE_ERR_EEPROM;
1218 		goto out;
1219 	}
1220 
1221 	/*
1222 	 * The EEPROM page size cannot be queried from the chip. We do lazy
1223 	 * initialization. It is worth to do that when we write large buffer.
1224 	 */
1225 	if ((hw->eeprom.word_page_size == 0) &&
1226 	    (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
1227 		ixgbe_detect_eeprom_page_size_generic(hw, offset);
1228 
1229 	/*
1230 	 * We cannot hold synchronization semaphores for too long
1231 	 * to avoid other entity starvation. However it is more efficient
1232 	 * to read in bursts than synchronizing access for each word.
1233 	 */
1234 	for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1235 		count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1236 			IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1237 		status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
1238 							    count, &data[i]);
1239 
1240 		if (status != IXGBE_SUCCESS)
1241 			break;
1242 	}
1243 
1244 out:
1245 	return status;
1246 }
1247 
1248 /**
1249  *  ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
1250  *  @hw: pointer to hardware structure
1251  *  @offset: offset within the EEPROM to be written to
1252  *  @words: number of word(s)
1253  *  @data: 16 bit word(s) to be written to the EEPROM
1254  *
1255  *  If ixgbe_eeprom_update_checksum is not called after this function, the
1256  *  EEPROM will most likely contain an invalid checksum.
1257  **/
1258 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1259 					      u16 words, u16 *data)
1260 {
1261 	s32 status;
1262 	u16 word;
1263 	u16 page_size;
1264 	u16 i;
1265 	u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
1266 
1267 	DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
1268 
1269 	/* Prepare the EEPROM for writing  */
1270 	status = ixgbe_acquire_eeprom(hw);
1271 
1272 	if (status == IXGBE_SUCCESS) {
1273 		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1274 			ixgbe_release_eeprom(hw);
1275 			status = IXGBE_ERR_EEPROM;
1276 		}
1277 	}
1278 
1279 	if (status == IXGBE_SUCCESS) {
1280 		for (i = 0; i < words; i++) {
1281 			ixgbe_standby_eeprom(hw);
1282 
1283 			/*  Send the WRITE ENABLE command (8 bit opcode )  */
1284 			ixgbe_shift_out_eeprom_bits(hw,
1285 						   IXGBE_EEPROM_WREN_OPCODE_SPI,
1286 						   IXGBE_EEPROM_OPCODE_BITS);
1287 
1288 			ixgbe_standby_eeprom(hw);
1289 
1290 			/*
1291 			 * Some SPI eeproms use the 8th address bit embedded
1292 			 * in the opcode
1293 			 */
1294 			if ((hw->eeprom.address_bits == 8) &&
1295 			    ((offset + i) >= 128))
1296 				write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1297 
1298 			/* Send the Write command (8-bit opcode + addr) */
1299 			ixgbe_shift_out_eeprom_bits(hw, write_opcode,
1300 						    IXGBE_EEPROM_OPCODE_BITS);
1301 			ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1302 						    hw->eeprom.address_bits);
1303 
1304 			page_size = hw->eeprom.word_page_size;
1305 
1306 			/* Send the data in burst via SPI*/
1307 			do {
1308 				word = data[i];
1309 				word = (word >> 8) | (word << 8);
1310 				ixgbe_shift_out_eeprom_bits(hw, word, 16);
1311 
1312 				if (page_size == 0)
1313 					break;
1314 
1315 				/* do not wrap around page */
1316 				if (((offset + i) & (page_size - 1)) ==
1317 				    (page_size - 1))
1318 					break;
1319 			} while (++i < words);
1320 
1321 			ixgbe_standby_eeprom(hw);
1322 			msec_delay(10);
1323 		}
1324 		/* Done with writing - release the EEPROM */
1325 		ixgbe_release_eeprom(hw);
1326 	}
1327 
1328 	return status;
1329 }
1330 
1331 /**
1332  *  ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
1333  *  @hw: pointer to hardware structure
1334  *  @offset: offset within the EEPROM to be written to
1335  *  @data: 16 bit word to be written to the EEPROM
1336  *
1337  *  If ixgbe_eeprom_update_checksum is not called after this function, the
1338  *  EEPROM will most likely contain an invalid checksum.
1339  **/
1340 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1341 {
1342 	s32 status;
1343 
1344 	DEBUGFUNC("ixgbe_write_eeprom_generic");
1345 
1346 	hw->eeprom.ops.init_params(hw);
1347 
1348 	if (offset >= hw->eeprom.word_size) {
1349 		status = IXGBE_ERR_EEPROM;
1350 		goto out;
1351 	}
1352 
1353 	status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
1354 
1355 out:
1356 	return status;
1357 }
1358 
1359 /**
1360  *  ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
1361  *  @hw: pointer to hardware structure
1362  *  @offset: offset within the EEPROM to be read
1363  *  @data: read 16 bit words(s) from EEPROM
1364  *  @words: number of word(s)
1365  *
1366  *  Reads 16 bit word(s) from EEPROM through bit-bang method
1367  **/
1368 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1369 					      u16 words, u16 *data)
1370 {
1371 	s32 status = IXGBE_SUCCESS;
1372 	u16 i, count;
1373 
1374 	DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
1375 
1376 	hw->eeprom.ops.init_params(hw);
1377 
1378 	if (words == 0) {
1379 		status = IXGBE_ERR_INVALID_ARGUMENT;
1380 		goto out;
1381 	}
1382 
1383 	if (offset + words > hw->eeprom.word_size) {
1384 		status = IXGBE_ERR_EEPROM;
1385 		goto out;
1386 	}
1387 
1388 	/*
1389 	 * We cannot hold synchronization semaphores for too long
1390 	 * to avoid other entity starvation. However it is more efficient
1391 	 * to read in bursts than synchronizing access for each word.
1392 	 */
1393 	for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1394 		count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1395 			IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1396 
1397 		status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1398 							   count, &data[i]);
1399 
1400 		if (status != IXGBE_SUCCESS)
1401 			break;
1402 	}
1403 
1404 out:
1405 	return status;
1406 }
1407 
1408 /**
1409  *  ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1410  *  @hw: pointer to hardware structure
1411  *  @offset: offset within the EEPROM to be read
1412  *  @words: number of word(s)
1413  *  @data: read 16 bit word(s) from EEPROM
1414  *
1415  *  Reads 16 bit word(s) from EEPROM through bit-bang method
1416  **/
1417 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1418 					     u16 words, u16 *data)
1419 {
1420 	s32 status;
1421 	u16 word_in;
1422 	u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1423 	u16 i;
1424 
1425 	DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
1426 
1427 	/* Prepare the EEPROM for reading  */
1428 	status = ixgbe_acquire_eeprom(hw);
1429 
1430 	if (status == IXGBE_SUCCESS) {
1431 		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1432 			ixgbe_release_eeprom(hw);
1433 			status = IXGBE_ERR_EEPROM;
1434 		}
1435 	}
1436 
1437 	if (status == IXGBE_SUCCESS) {
1438 		for (i = 0; i < words; i++) {
1439 			ixgbe_standby_eeprom(hw);
1440 			/*
1441 			 * Some SPI eeproms use the 8th address bit embedded
1442 			 * in the opcode
1443 			 */
1444 			if ((hw->eeprom.address_bits == 8) &&
1445 			    ((offset + i) >= 128))
1446 				read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1447 
1448 			/* Send the READ command (opcode + addr) */
1449 			ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1450 						    IXGBE_EEPROM_OPCODE_BITS);
1451 			ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1452 						    hw->eeprom.address_bits);
1453 
1454 			/* Read the data. */
1455 			word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1456 			data[i] = (word_in >> 8) | (word_in << 8);
1457 		}
1458 
1459 		/* End this read operation */
1460 		ixgbe_release_eeprom(hw);
1461 	}
1462 
1463 	return status;
1464 }
1465 
1466 /**
1467  *  ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1468  *  @hw: pointer to hardware structure
1469  *  @offset: offset within the EEPROM to be read
1470  *  @data: read 16 bit value from EEPROM
1471  *
1472  *  Reads 16 bit value from EEPROM through bit-bang method
1473  **/
1474 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1475 				       u16 *data)
1476 {
1477 	s32 status;
1478 
1479 	DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1480 
1481 	hw->eeprom.ops.init_params(hw);
1482 
1483 	if (offset >= hw->eeprom.word_size) {
1484 		status = IXGBE_ERR_EEPROM;
1485 		goto out;
1486 	}
1487 
1488 	status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1489 
1490 out:
1491 	return status;
1492 }
1493 
1494 /**
1495  *  ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1496  *  @hw: pointer to hardware structure
1497  *  @offset: offset of word in the EEPROM to read
1498  *  @words: number of word(s)
1499  *  @data: 16 bit word(s) from the EEPROM
1500  *
1501  *  Reads a 16 bit word(s) from the EEPROM using the EERD register.
1502  **/
1503 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1504 				   u16 words, u16 *data)
1505 {
1506 	u32 eerd;
1507 	s32 status = IXGBE_SUCCESS;
1508 	u32 i;
1509 
1510 	DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
1511 
1512 	hw->eeprom.ops.init_params(hw);
1513 
1514 	if (words == 0) {
1515 		status = IXGBE_ERR_INVALID_ARGUMENT;
1516 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1517 		goto out;
1518 	}
1519 
1520 	if (offset >= hw->eeprom.word_size) {
1521 		status = IXGBE_ERR_EEPROM;
1522 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1523 		goto out;
1524 	}
1525 
1526 	for (i = 0; i < words; i++) {
1527 		eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1528 		       IXGBE_EEPROM_RW_REG_START;
1529 
1530 		IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1531 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1532 
1533 		if (status == IXGBE_SUCCESS) {
1534 			data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1535 				   IXGBE_EEPROM_RW_REG_DATA);
1536 		} else {
1537 			DEBUGOUT("Eeprom read timed out\n");
1538 			goto out;
1539 		}
1540 	}
1541 out:
1542 	return status;
1543 }
1544 
1545 /**
1546  *  ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
1547  *  @hw: pointer to hardware structure
1548  *  @offset: offset within the EEPROM to be used as a scratch pad
1549  *
1550  *  Discover EEPROM page size by writing marching data at given offset.
1551  *  This function is called only when we are writing a new large buffer
1552  *  at given offset so the data would be overwritten anyway.
1553  **/
1554 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1555 						 u16 offset)
1556 {
1557 	u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1558 	s32 status = IXGBE_SUCCESS;
1559 	u16 i;
1560 
1561 	DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
1562 
1563 	for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1564 		data[i] = i;
1565 
1566 	hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1567 	status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1568 					     IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1569 	hw->eeprom.word_page_size = 0;
1570 	if (status != IXGBE_SUCCESS)
1571 		goto out;
1572 
1573 	status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1574 	if (status != IXGBE_SUCCESS)
1575 		goto out;
1576 
1577 	/*
1578 	 * When writing in burst more than the actual page size
1579 	 * EEPROM address wraps around current page.
1580 	 */
1581 	hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1582 
1583 	DEBUGOUT1("Detected EEPROM page size = %d words.",
1584 		  hw->eeprom.word_page_size);
1585 out:
1586 	return status;
1587 }
1588 
1589 /**
1590  *  ixgbe_read_eerd_generic - Read EEPROM word using EERD
1591  *  @hw: pointer to hardware structure
1592  *  @offset: offset of  word in the EEPROM to read
1593  *  @data: word read from the EEPROM
1594  *
1595  *  Reads a 16 bit word from the EEPROM using the EERD register.
1596  **/
1597 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1598 {
1599 	return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1600 }
1601 
1602 /**
1603  *  ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1604  *  @hw: pointer to hardware structure
1605  *  @offset: offset of  word in the EEPROM to write
1606  *  @words: number of word(s)
1607  *  @data: word(s) write to the EEPROM
1608  *
1609  *  Write a 16 bit word(s) to the EEPROM using the EEWR register.
1610  **/
1611 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1612 				    u16 words, u16 *data)
1613 {
1614 	u32 eewr;
1615 	s32 status = IXGBE_SUCCESS;
1616 	u16 i;
1617 
1618 	DEBUGFUNC("ixgbe_write_eewr_generic");
1619 
1620 	hw->eeprom.ops.init_params(hw);
1621 
1622 	if (words == 0) {
1623 		status = IXGBE_ERR_INVALID_ARGUMENT;
1624 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1625 		goto out;
1626 	}
1627 
1628 	if (offset >= hw->eeprom.word_size) {
1629 		status = IXGBE_ERR_EEPROM;
1630 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1631 		goto out;
1632 	}
1633 
1634 	for (i = 0; i < words; i++) {
1635 		eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1636 			(data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1637 			IXGBE_EEPROM_RW_REG_START;
1638 
1639 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1640 		if (status != IXGBE_SUCCESS) {
1641 			DEBUGOUT("Eeprom write EEWR timed out\n");
1642 			goto out;
1643 		}
1644 
1645 		IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1646 
1647 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1648 		if (status != IXGBE_SUCCESS) {
1649 			DEBUGOUT("Eeprom write EEWR timed out\n");
1650 			goto out;
1651 		}
1652 	}
1653 
1654 out:
1655 	return status;
1656 }
1657 
1658 /**
1659  *  ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1660  *  @hw: pointer to hardware structure
1661  *  @offset: offset of  word in the EEPROM to write
1662  *  @data: word write to the EEPROM
1663  *
1664  *  Write a 16 bit word to the EEPROM using the EEWR register.
1665  **/
1666 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1667 {
1668 	return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1669 }
1670 
1671 /**
1672  *  ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1673  *  @hw: pointer to hardware structure
1674  *  @ee_reg: EEPROM flag for polling
1675  *
1676  *  Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1677  *  read or write is done respectively.
1678  **/
1679 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1680 {
1681 	u32 i;
1682 	u32 reg;
1683 	s32 status = IXGBE_ERR_EEPROM;
1684 
1685 	DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1686 
1687 	for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1688 		if (ee_reg == IXGBE_NVM_POLL_READ)
1689 			reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1690 		else
1691 			reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1692 
1693 		if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1694 			status = IXGBE_SUCCESS;
1695 			break;
1696 		}
1697 		usec_delay(5);
1698 	}
1699 
1700 	if (i == IXGBE_EERD_EEWR_ATTEMPTS)
1701 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
1702 			     "EEPROM read/write done polling timed out");
1703 
1704 	return status;
1705 }
1706 
1707 /**
1708  *  ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1709  *  @hw: pointer to hardware structure
1710  *
1711  *  Prepares EEPROM for access using bit-bang method. This function should
1712  *  be called before issuing a command to the EEPROM.
1713  **/
1714 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1715 {
1716 	s32 status = IXGBE_SUCCESS;
1717 	u32 eec;
1718 	u32 i;
1719 
1720 	DEBUGFUNC("ixgbe_acquire_eeprom");
1721 
1722 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1723 	    != IXGBE_SUCCESS)
1724 		status = IXGBE_ERR_SWFW_SYNC;
1725 
1726 	if (status == IXGBE_SUCCESS) {
1727 		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1728 
1729 		/* Request EEPROM Access */
1730 		eec |= IXGBE_EEC_REQ;
1731 		IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1732 
1733 		for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1734 			eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1735 			if (eec & IXGBE_EEC_GNT)
1736 				break;
1737 			usec_delay(5);
1738 		}
1739 
1740 		/* Release if grant not acquired */
1741 		if (!(eec & IXGBE_EEC_GNT)) {
1742 			eec &= ~IXGBE_EEC_REQ;
1743 			IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1744 			DEBUGOUT("Could not acquire EEPROM grant\n");
1745 
1746 			hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1747 			status = IXGBE_ERR_EEPROM;
1748 		}
1749 
1750 		/* Setup EEPROM for Read/Write */
1751 		if (status == IXGBE_SUCCESS) {
1752 			/* Clear CS and SK */
1753 			eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1754 			IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1755 			IXGBE_WRITE_FLUSH(hw);
1756 			usec_delay(1);
1757 		}
1758 	}
1759 	return status;
1760 }
1761 
1762 /**
1763  *  ixgbe_get_eeprom_semaphore - Get hardware semaphore
1764  *  @hw: pointer to hardware structure
1765  *
1766  *  Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1767  **/
1768 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1769 {
1770 	s32 status = IXGBE_ERR_EEPROM;
1771 	u32 timeout = 2000;
1772 	u32 i;
1773 	u32 swsm;
1774 
1775 	DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1776 
1777 
1778 	/* Get SMBI software semaphore between device drivers first */
1779 	for (i = 0; i < timeout; i++) {
1780 		/*
1781 		 * If the SMBI bit is 0 when we read it, then the bit will be
1782 		 * set and we have the semaphore
1783 		 */
1784 		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1785 		if (!(swsm & IXGBE_SWSM_SMBI)) {
1786 			status = IXGBE_SUCCESS;
1787 			break;
1788 		}
1789 		usec_delay(50);
1790 	}
1791 
1792 	if (i == timeout) {
1793 		DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1794 			 "not granted.\n");
1795 		/*
1796 		 * this release is particularly important because our attempts
1797 		 * above to get the semaphore may have succeeded, and if there
1798 		 * was a timeout, we should unconditionally clear the semaphore
1799 		 * bits to free the driver to make progress
1800 		 */
1801 		ixgbe_release_eeprom_semaphore(hw);
1802 
1803 		usec_delay(50);
1804 		/*
1805 		 * one last try
1806 		 * If the SMBI bit is 0 when we read it, then the bit will be
1807 		 * set and we have the semaphore
1808 		 */
1809 		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1810 		if (!(swsm & IXGBE_SWSM_SMBI))
1811 			status = IXGBE_SUCCESS;
1812 	}
1813 
1814 	/* Now get the semaphore between SW/FW through the SWESMBI bit */
1815 	if (status == IXGBE_SUCCESS) {
1816 		for (i = 0; i < timeout; i++) {
1817 			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1818 
1819 			/* Set the SW EEPROM semaphore bit to request access */
1820 			swsm |= IXGBE_SWSM_SWESMBI;
1821 			IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1822 
1823 			/*
1824 			 * If we set the bit successfully then we got the
1825 			 * semaphore.
1826 			 */
1827 			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1828 			if (swsm & IXGBE_SWSM_SWESMBI)
1829 				break;
1830 
1831 			usec_delay(50);
1832 		}
1833 
1834 		/*
1835 		 * Release semaphores and return error if SW EEPROM semaphore
1836 		 * was not granted because we don't have access to the EEPROM
1837 		 */
1838 		if (i >= timeout) {
1839 			ERROR_REPORT1(IXGBE_ERROR_POLLING,
1840 			    "SWESMBI Software EEPROM semaphore not granted.\n");
1841 			ixgbe_release_eeprom_semaphore(hw);
1842 			status = IXGBE_ERR_EEPROM;
1843 		}
1844 	} else {
1845 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
1846 			     "Software semaphore SMBI between device drivers "
1847 			     "not granted.\n");
1848 	}
1849 
1850 	return status;
1851 }
1852 
1853 /**
1854  *  ixgbe_release_eeprom_semaphore - Release hardware semaphore
1855  *  @hw: pointer to hardware structure
1856  *
1857  *  This function clears hardware semaphore bits.
1858  **/
1859 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1860 {
1861 	u32 swsm;
1862 
1863 	DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1864 
1865 	swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1866 
1867 	/* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1868 	swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1869 	IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1870 	IXGBE_WRITE_FLUSH(hw);
1871 }
1872 
1873 /**
1874  *  ixgbe_ready_eeprom - Polls for EEPROM ready
1875  *  @hw: pointer to hardware structure
1876  **/
1877 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1878 {
1879 	s32 status = IXGBE_SUCCESS;
1880 	u16 i;
1881 	u8 spi_stat_reg;
1882 
1883 	DEBUGFUNC("ixgbe_ready_eeprom");
1884 
1885 	/*
1886 	 * Read "Status Register" repeatedly until the LSB is cleared.  The
1887 	 * EEPROM will signal that the command has been completed by clearing
1888 	 * bit 0 of the internal status register.  If it's not cleared within
1889 	 * 5 milliseconds, then error out.
1890 	 */
1891 	for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1892 		ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1893 					    IXGBE_EEPROM_OPCODE_BITS);
1894 		spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
1895 		if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1896 			break;
1897 
1898 		usec_delay(5);
1899 		ixgbe_standby_eeprom(hw);
1900 	};
1901 
1902 	/*
1903 	 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
1904 	 * devices (and only 0-5mSec on 5V devices)
1905 	 */
1906 	if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
1907 		DEBUGOUT("SPI EEPROM Status error\n");
1908 		status = IXGBE_ERR_EEPROM;
1909 	}
1910 
1911 	return status;
1912 }
1913 
1914 /**
1915  *  ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
1916  *  @hw: pointer to hardware structure
1917  **/
1918 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1919 {
1920 	u32 eec;
1921 
1922 	DEBUGFUNC("ixgbe_standby_eeprom");
1923 
1924 	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1925 
1926 	/* Toggle CS to flush commands */
1927 	eec |= IXGBE_EEC_CS;
1928 	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1929 	IXGBE_WRITE_FLUSH(hw);
1930 	usec_delay(1);
1931 	eec &= ~IXGBE_EEC_CS;
1932 	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1933 	IXGBE_WRITE_FLUSH(hw);
1934 	usec_delay(1);
1935 }
1936 
1937 /**
1938  *  ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
1939  *  @hw: pointer to hardware structure
1940  *  @data: data to send to the EEPROM
1941  *  @count: number of bits to shift out
1942  **/
1943 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
1944 					u16 count)
1945 {
1946 	u32 eec;
1947 	u32 mask;
1948 	u32 i;
1949 
1950 	DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
1951 
1952 	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1953 
1954 	/*
1955 	 * Mask is used to shift "count" bits of "data" out to the EEPROM
1956 	 * one bit at a time.  Determine the starting bit based on count
1957 	 */
1958 	mask = 0x01 << (count - 1);
1959 
1960 	for (i = 0; i < count; i++) {
1961 		/*
1962 		 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
1963 		 * "1", and then raising and then lowering the clock (the SK
1964 		 * bit controls the clock input to the EEPROM).  A "0" is
1965 		 * shifted out to the EEPROM by setting "DI" to "0" and then
1966 		 * raising and then lowering the clock.
1967 		 */
1968 		if (data & mask)
1969 			eec |= IXGBE_EEC_DI;
1970 		else
1971 			eec &= ~IXGBE_EEC_DI;
1972 
1973 		IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1974 		IXGBE_WRITE_FLUSH(hw);
1975 
1976 		usec_delay(1);
1977 
1978 		ixgbe_raise_eeprom_clk(hw, &eec);
1979 		ixgbe_lower_eeprom_clk(hw, &eec);
1980 
1981 		/*
1982 		 * Shift mask to signify next bit of data to shift in to the
1983 		 * EEPROM
1984 		 */
1985 		mask = mask >> 1;
1986 	};
1987 
1988 	/* We leave the "DI" bit set to "0" when we leave this routine. */
1989 	eec &= ~IXGBE_EEC_DI;
1990 	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1991 	IXGBE_WRITE_FLUSH(hw);
1992 }
1993 
1994 /**
1995  *  ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
1996  *  @hw: pointer to hardware structure
1997  **/
1998 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
1999 {
2000 	u32 eec;
2001 	u32 i;
2002 	u16 data = 0;
2003 
2004 	DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
2005 
2006 	/*
2007 	 * In order to read a register from the EEPROM, we need to shift
2008 	 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
2009 	 * the clock input to the EEPROM (setting the SK bit), and then reading
2010 	 * the value of the "DO" bit.  During this "shifting in" process the
2011 	 * "DI" bit should always be clear.
2012 	 */
2013 	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
2014 
2015 	eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
2016 
2017 	for (i = 0; i < count; i++) {
2018 		data = data << 1;
2019 		ixgbe_raise_eeprom_clk(hw, &eec);
2020 
2021 		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
2022 
2023 		eec &= ~(IXGBE_EEC_DI);
2024 		if (eec & IXGBE_EEC_DO)
2025 			data |= 1;
2026 
2027 		ixgbe_lower_eeprom_clk(hw, &eec);
2028 	}
2029 
2030 	return data;
2031 }
2032 
2033 /**
2034  *  ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
2035  *  @hw: pointer to hardware structure
2036  *  @eec: EEC register's current value
2037  **/
2038 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2039 {
2040 	DEBUGFUNC("ixgbe_raise_eeprom_clk");
2041 
2042 	/*
2043 	 * Raise the clock input to the EEPROM
2044 	 * (setting the SK bit), then delay
2045 	 */
2046 	*eec = *eec | IXGBE_EEC_SK;
2047 	IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
2048 	IXGBE_WRITE_FLUSH(hw);
2049 	usec_delay(1);
2050 }
2051 
2052 /**
2053  *  ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
2054  *  @hw: pointer to hardware structure
2055  *  @eecd: EECD's current value
2056  **/
2057 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2058 {
2059 	DEBUGFUNC("ixgbe_lower_eeprom_clk");
2060 
2061 	/*
2062 	 * Lower the clock input to the EEPROM (clearing the SK bit), then
2063 	 * delay
2064 	 */
2065 	*eec = *eec & ~IXGBE_EEC_SK;
2066 	IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
2067 	IXGBE_WRITE_FLUSH(hw);
2068 	usec_delay(1);
2069 }
2070 
2071 /**
2072  *  ixgbe_release_eeprom - Release EEPROM, release semaphores
2073  *  @hw: pointer to hardware structure
2074  **/
2075 static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
2076 {
2077 	u32 eec;
2078 
2079 	DEBUGFUNC("ixgbe_release_eeprom");
2080 
2081 	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
2082 
2083 	eec |= IXGBE_EEC_CS;  /* Pull CS high */
2084 	eec &= ~IXGBE_EEC_SK; /* Lower SCK */
2085 
2086 	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
2087 	IXGBE_WRITE_FLUSH(hw);
2088 
2089 	usec_delay(1);
2090 
2091 	/* Stop requesting EEPROM access */
2092 	eec &= ~IXGBE_EEC_REQ;
2093 	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
2094 
2095 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
2096 
2097 	/* Delay before attempt to obtain semaphore again to allow FW access */
2098 	msec_delay(hw->eeprom.semaphore_delay);
2099 }
2100 
2101 /**
2102  *  ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
2103  *  @hw: pointer to hardware structure
2104  *
2105  *  Returns a negative error code on error, or the 16-bit checksum
2106  **/
2107 s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
2108 {
2109 	u16 i;
2110 	u16 j;
2111 	u16 checksum = 0;
2112 	u16 length = 0;
2113 	u16 pointer = 0;
2114 	u16 word = 0;
2115 
2116 	DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
2117 
2118 	/* Include 0x0-0x3F in the checksum */
2119 	for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
2120 		if (hw->eeprom.ops.read(hw, i, &word)) {
2121 			DEBUGOUT("EEPROM read failed\n");
2122 			return IXGBE_ERR_EEPROM;
2123 		}
2124 		checksum += word;
2125 	}
2126 
2127 	/* Include all data from pointers except for the fw pointer */
2128 	for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
2129 		if (hw->eeprom.ops.read(hw, i, &pointer)) {
2130 			DEBUGOUT("EEPROM read failed\n");
2131 			return IXGBE_ERR_EEPROM;
2132 		}
2133 
2134 		/* If the pointer seems invalid */
2135 		if (pointer == 0xFFFF || pointer == 0)
2136 			continue;
2137 
2138 		if (hw->eeprom.ops.read(hw, pointer, &length)) {
2139 			DEBUGOUT("EEPROM read failed\n");
2140 			return IXGBE_ERR_EEPROM;
2141 		}
2142 
2143 		if (length == 0xFFFF || length == 0)
2144 			continue;
2145 
2146 		for (j = pointer + 1; j <= pointer + length; j++) {
2147 			if (hw->eeprom.ops.read(hw, j, &word)) {
2148 				DEBUGOUT("EEPROM read failed\n");
2149 				return IXGBE_ERR_EEPROM;
2150 			}
2151 			checksum += word;
2152 		}
2153 	}
2154 
2155 	checksum = (u16)IXGBE_EEPROM_SUM - checksum;
2156 
2157 	return (s32)checksum;
2158 }
2159 
2160 /**
2161  *  ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
2162  *  @hw: pointer to hardware structure
2163  *  @checksum_val: calculated checksum
2164  *
2165  *  Performs checksum calculation and validates the EEPROM checksum.  If the
2166  *  caller does not need checksum_val, the value can be NULL.
2167  **/
2168 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
2169 					   u16 *checksum_val)
2170 {
2171 	s32 status;
2172 	u16 checksum;
2173 	u16 read_checksum = 0;
2174 
2175 	DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
2176 
2177 	/* Read the first word from the EEPROM. If this times out or fails, do
2178 	 * not continue or we could be in for a very long wait while every
2179 	 * EEPROM read fails
2180 	 */
2181 	status = hw->eeprom.ops.read(hw, 0, &checksum);
2182 	if (status) {
2183 		DEBUGOUT("EEPROM read failed\n");
2184 		return status;
2185 	}
2186 
2187 	status = hw->eeprom.ops.calc_checksum(hw);
2188 	if (status < 0)
2189 		return status;
2190 
2191 	checksum = (u16)(status & 0xffff);
2192 
2193 	status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
2194 	if (status) {
2195 		DEBUGOUT("EEPROM read failed\n");
2196 		return status;
2197 	}
2198 
2199 	/* Verify read checksum from EEPROM is the same as
2200 	 * calculated checksum
2201 	 */
2202 	if (read_checksum != checksum)
2203 		status = IXGBE_ERR_EEPROM_CHECKSUM;
2204 
2205 	/* If the user cares, return the calculated checksum */
2206 	if (checksum_val)
2207 		*checksum_val = checksum;
2208 
2209 	return status;
2210 }
2211 
2212 /**
2213  *  ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
2214  *  @hw: pointer to hardware structure
2215  **/
2216 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
2217 {
2218 	s32 status;
2219 	u16 checksum;
2220 
2221 	DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
2222 
2223 	/* Read the first word from the EEPROM. If this times out or fails, do
2224 	 * not continue or we could be in for a very long wait while every
2225 	 * EEPROM read fails
2226 	 */
2227 	status = hw->eeprom.ops.read(hw, 0, &checksum);
2228 	if (status) {
2229 		DEBUGOUT("EEPROM read failed\n");
2230 		return status;
2231 	}
2232 
2233 	status = hw->eeprom.ops.calc_checksum(hw);
2234 	if (status < 0)
2235 		return status;
2236 
2237 	checksum = (u16)(status & 0xffff);
2238 
2239 	status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
2240 
2241 	return status;
2242 }
2243 
2244 /**
2245  *  ixgbe_validate_mac_addr - Validate MAC address
2246  *  @mac_addr: pointer to MAC address.
2247  *
2248  *  Tests a MAC address to ensure it is a valid Individual Address
2249  **/
2250 s32 ixgbe_validate_mac_addr(u8 *mac_addr)
2251 {
2252 	s32 status = IXGBE_SUCCESS;
2253 
2254 	DEBUGFUNC("ixgbe_validate_mac_addr");
2255 
2256 	/* Make sure it is not a multicast address */
2257 	if (IXGBE_IS_MULTICAST(mac_addr)) {
2258 		DEBUGOUT("MAC address is multicast\n");
2259 		status = IXGBE_ERR_INVALID_MAC_ADDR;
2260 	/* Not a broadcast address */
2261 	} else if (IXGBE_IS_BROADCAST(mac_addr)) {
2262 		DEBUGOUT("MAC address is broadcast\n");
2263 		status = IXGBE_ERR_INVALID_MAC_ADDR;
2264 	/* Reject the zero address */
2265 	} else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
2266 		   mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
2267 		DEBUGOUT("MAC address is all zeros\n");
2268 		status = IXGBE_ERR_INVALID_MAC_ADDR;
2269 	}
2270 	return status;
2271 }
2272 
2273 /**
2274  *  ixgbe_set_rar_generic - Set Rx address register
2275  *  @hw: pointer to hardware structure
2276  *  @index: Receive address register to write
2277  *  @addr: Address to put into receive address register
2278  *  @vmdq: VMDq "set" or "pool" index
2279  *  @enable_addr: set flag that address is active
2280  *
2281  *  Puts an ethernet address into a receive address register.
2282  **/
2283 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
2284 			  u32 enable_addr)
2285 {
2286 	u32 rar_low, rar_high;
2287 	u32 rar_entries = hw->mac.num_rar_entries;
2288 
2289 	DEBUGFUNC("ixgbe_set_rar_generic");
2290 
2291 	/* Make sure we are using a valid rar index range */
2292 	if (index >= rar_entries) {
2293 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2294 			     "RAR index %d is out of range.\n", index);
2295 		return IXGBE_ERR_INVALID_ARGUMENT;
2296 	}
2297 
2298 	/* setup VMDq pool selection before this RAR gets enabled */
2299 	hw->mac.ops.set_vmdq(hw, index, vmdq);
2300 
2301 	/*
2302 	 * HW expects these in little endian so we reverse the byte
2303 	 * order from network order (big endian) to little endian
2304 	 */
2305 	rar_low = ((u32)addr[0] |
2306 		   ((u32)addr[1] << 8) |
2307 		   ((u32)addr[2] << 16) |
2308 		   ((u32)addr[3] << 24));
2309 	/*
2310 	 * Some parts put the VMDq setting in the extra RAH bits,
2311 	 * so save everything except the lower 16 bits that hold part
2312 	 * of the address and the address valid bit.
2313 	 */
2314 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2315 	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2316 	rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
2317 
2318 	if (enable_addr != 0)
2319 		rar_high |= IXGBE_RAH_AV;
2320 
2321 	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
2322 	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2323 
2324 	return IXGBE_SUCCESS;
2325 }
2326 
2327 /**
2328  *  ixgbe_clear_rar_generic - Remove Rx address register
2329  *  @hw: pointer to hardware structure
2330  *  @index: Receive address register to write
2331  *
2332  *  Clears an ethernet address from a receive address register.
2333  **/
2334 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
2335 {
2336 	u32 rar_high;
2337 	u32 rar_entries = hw->mac.num_rar_entries;
2338 
2339 	DEBUGFUNC("ixgbe_clear_rar_generic");
2340 
2341 	/* Make sure we are using a valid rar index range */
2342 	if (index >= rar_entries) {
2343 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2344 			     "RAR index %d is out of range.\n", index);
2345 		return IXGBE_ERR_INVALID_ARGUMENT;
2346 	}
2347 
2348 	/*
2349 	 * Some parts put the VMDq setting in the extra RAH bits,
2350 	 * so save everything except the lower 16 bits that hold part
2351 	 * of the address and the address valid bit.
2352 	 */
2353 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2354 	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2355 
2356 	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
2357 	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2358 
2359 	/* clear VMDq pool/queue selection for this RAR */
2360 	hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
2361 
2362 	return IXGBE_SUCCESS;
2363 }
2364 
2365 /**
2366  *  ixgbe_init_rx_addrs_generic - Initializes receive address filters.
2367  *  @hw: pointer to hardware structure
2368  *
2369  *  Places the MAC address in receive address register 0 and clears the rest
2370  *  of the receive address registers. Clears the multicast table. Assumes
2371  *  the receiver is in reset when the routine is called.
2372  **/
2373 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
2374 {
2375 	u32 i;
2376 	u32 rar_entries = hw->mac.num_rar_entries;
2377 
2378 	DEBUGFUNC("ixgbe_init_rx_addrs_generic");
2379 
2380 	/*
2381 	 * If the current mac address is valid, assume it is a software override
2382 	 * to the permanent address.
2383 	 * Otherwise, use the permanent address from the eeprom.
2384 	 */
2385 	if (ixgbe_validate_mac_addr(hw->mac.addr) ==
2386 	    IXGBE_ERR_INVALID_MAC_ADDR) {
2387 		/* Get the MAC address from the RAR0 for later reference */
2388 		hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2389 
2390 		DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
2391 			  hw->mac.addr[0], hw->mac.addr[1],
2392 			  hw->mac.addr[2]);
2393 		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2394 			  hw->mac.addr[4], hw->mac.addr[5]);
2395 	} else {
2396 		/* Setup the receive address. */
2397 		DEBUGOUT("Overriding MAC Address in RAR[0]\n");
2398 		DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
2399 			  hw->mac.addr[0], hw->mac.addr[1],
2400 			  hw->mac.addr[2]);
2401 		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2402 			  hw->mac.addr[4], hw->mac.addr[5]);
2403 
2404 		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2405 
2406 		/* clear VMDq pool/queue selection for RAR 0 */
2407 		hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
2408 	}
2409 	hw->addr_ctrl.overflow_promisc = 0;
2410 
2411 	hw->addr_ctrl.rar_used_count = 1;
2412 
2413 	/* Zero out the other receive addresses. */
2414 	DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
2415 	for (i = 1; i < rar_entries; i++) {
2416 		IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
2417 		IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
2418 	}
2419 
2420 	/* Clear the MTA */
2421 	hw->addr_ctrl.mta_in_use = 0;
2422 	IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2423 
2424 	DEBUGOUT(" Clearing MTA\n");
2425 	for (i = 0; i < hw->mac.mcft_size; i++)
2426 		IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
2427 
2428 	ixgbe_init_uta_tables(hw);
2429 
2430 	return IXGBE_SUCCESS;
2431 }
2432 
2433 /**
2434  *  ixgbe_add_uc_addr - Adds a secondary unicast address.
2435  *  @hw: pointer to hardware structure
2436  *  @addr: new address
2437  *
2438  *  Adds it to unused receive address register or goes into promiscuous mode.
2439  **/
2440 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
2441 {
2442 	u32 rar_entries = hw->mac.num_rar_entries;
2443 	u32 rar;
2444 
2445 	DEBUGFUNC("ixgbe_add_uc_addr");
2446 
2447 	DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
2448 		  addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
2449 
2450 	/*
2451 	 * Place this address in the RAR if there is room,
2452 	 * else put the controller into promiscuous mode
2453 	 */
2454 	if (hw->addr_ctrl.rar_used_count < rar_entries) {
2455 		rar = hw->addr_ctrl.rar_used_count;
2456 		hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2457 		DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
2458 		hw->addr_ctrl.rar_used_count++;
2459 	} else {
2460 		hw->addr_ctrl.overflow_promisc++;
2461 	}
2462 
2463 	DEBUGOUT("ixgbe_add_uc_addr Complete\n");
2464 }
2465 
2466 /**
2467  *  ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
2468  *  @hw: pointer to hardware structure
2469  *  @addr_list: the list of new addresses
2470  *  @addr_count: number of addresses
2471  *  @next: iterator function to walk the address list
2472  *
2473  *  The given list replaces any existing list.  Clears the secondary addrs from
2474  *  receive address registers.  Uses unused receive address registers for the
2475  *  first secondary addresses, and falls back to promiscuous mode as needed.
2476  *
2477  *  Drivers using secondary unicast addresses must set user_set_promisc when
2478  *  manually putting the device into promiscuous mode.
2479  **/
2480 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
2481 				      u32 addr_count, ixgbe_mc_addr_itr next)
2482 {
2483 	u8 *addr;
2484 	u32 i;
2485 	u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
2486 	u32 uc_addr_in_use;
2487 	u32 fctrl;
2488 	u32 vmdq;
2489 
2490 	DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
2491 
2492 	/*
2493 	 * Clear accounting of old secondary address list,
2494 	 * don't count RAR[0]
2495 	 */
2496 	uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
2497 	hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
2498 	hw->addr_ctrl.overflow_promisc = 0;
2499 
2500 	/* Zero out the other receive addresses */
2501 	DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
2502 	for (i = 0; i < uc_addr_in_use; i++) {
2503 		IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
2504 		IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
2505 	}
2506 
2507 	/* Add the new addresses */
2508 	for (i = 0; i < addr_count; i++) {
2509 		DEBUGOUT(" Adding the secondary addresses:\n");
2510 		addr = next(hw, &addr_list, &vmdq);
2511 		ixgbe_add_uc_addr(hw, addr, vmdq);
2512 	}
2513 
2514 	if (hw->addr_ctrl.overflow_promisc) {
2515 		/* enable promisc if not already in overflow or set by user */
2516 		if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2517 			DEBUGOUT(" Entering address overflow promisc mode\n");
2518 			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2519 			fctrl |= IXGBE_FCTRL_UPE;
2520 			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2521 		}
2522 	} else {
2523 		/* only disable if set by overflow, not by user */
2524 		if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2525 			DEBUGOUT(" Leaving address overflow promisc mode\n");
2526 			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2527 			fctrl &= ~IXGBE_FCTRL_UPE;
2528 			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2529 		}
2530 	}
2531 
2532 	DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
2533 	return IXGBE_SUCCESS;
2534 }
2535 
2536 /**
2537  *  ixgbe_mta_vector - Determines bit-vector in multicast table to set
2538  *  @hw: pointer to hardware structure
2539  *  @mc_addr: the multicast address
2540  *
2541  *  Extracts the 12 bits, from a multicast address, to determine which
2542  *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
2543  *  incoming rx multicast addresses, to determine the bit-vector to check in
2544  *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
2545  *  by the MO field of the MCSTCTRL. The MO field is set during initialization
2546  *  to mc_filter_type.
2547  **/
2548 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
2549 {
2550 	u32 vector = 0;
2551 
2552 	DEBUGFUNC("ixgbe_mta_vector");
2553 
2554 	switch (hw->mac.mc_filter_type) {
2555 	case 0:   /* use bits [47:36] of the address */
2556 		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
2557 		break;
2558 	case 1:   /* use bits [46:35] of the address */
2559 		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
2560 		break;
2561 	case 2:   /* use bits [45:34] of the address */
2562 		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
2563 		break;
2564 	case 3:   /* use bits [43:32] of the address */
2565 		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
2566 		break;
2567 	default:  /* Invalid mc_filter_type */
2568 		DEBUGOUT("MC filter type param set incorrectly\n");
2569 		ASSERT(0);
2570 		break;
2571 	}
2572 
2573 	/* vector can only be 12-bits or boundary will be exceeded */
2574 	vector &= 0xFFF;
2575 	return vector;
2576 }
2577 
2578 /**
2579  *  ixgbe_set_mta - Set bit-vector in multicast table
2580  *  @hw: pointer to hardware structure
2581  *  @hash_value: Multicast address hash value
2582  *
2583  *  Sets the bit-vector in the multicast table.
2584  **/
2585 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
2586 {
2587 	u32 vector;
2588 	u32 vector_bit;
2589 	u32 vector_reg;
2590 
2591 	DEBUGFUNC("ixgbe_set_mta");
2592 
2593 	hw->addr_ctrl.mta_in_use++;
2594 
2595 	vector = ixgbe_mta_vector(hw, mc_addr);
2596 	DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
2597 
2598 	/*
2599 	 * The MTA is a register array of 128 32-bit registers. It is treated
2600 	 * like an array of 4096 bits.  We want to set bit
2601 	 * BitArray[vector_value]. So we figure out what register the bit is
2602 	 * in, read it, OR in the new bit, then write back the new value.  The
2603 	 * register is determined by the upper 7 bits of the vector value and
2604 	 * the bit within that register are determined by the lower 5 bits of
2605 	 * the value.
2606 	 */
2607 	vector_reg = (vector >> 5) & 0x7F;
2608 	vector_bit = vector & 0x1F;
2609 	hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2610 }
2611 
2612 /**
2613  *  ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2614  *  @hw: pointer to hardware structure
2615  *  @mc_addr_list: the list of new multicast addresses
2616  *  @mc_addr_count: number of addresses
2617  *  @next: iterator function to walk the multicast address list
2618  *  @clear: flag, when set clears the table beforehand
2619  *
2620  *  When the clear flag is set, the given list replaces any existing list.
2621  *  Hashes the given addresses into the multicast table.
2622  **/
2623 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
2624 				      u32 mc_addr_count, ixgbe_mc_addr_itr next,
2625 				      bool clear)
2626 {
2627 	u32 i;
2628 	u32 vmdq;
2629 
2630 	DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2631 
2632 	/*
2633 	 * Set the new number of MC addresses that we are being requested to
2634 	 * use.
2635 	 */
2636 	hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2637 	hw->addr_ctrl.mta_in_use = 0;
2638 
2639 	/* Clear mta_shadow */
2640 	if (clear) {
2641 		DEBUGOUT(" Clearing MTA\n");
2642 		memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2643 	}
2644 
2645 	/* Update mta_shadow */
2646 	for (i = 0; i < mc_addr_count; i++) {
2647 		DEBUGOUT(" Adding the multicast addresses:\n");
2648 		ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2649 	}
2650 
2651 	/* Enable mta */
2652 	for (i = 0; i < hw->mac.mcft_size; i++)
2653 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2654 				      hw->mac.mta_shadow[i]);
2655 
2656 	if (hw->addr_ctrl.mta_in_use > 0)
2657 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2658 				IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2659 
2660 	DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2661 	return IXGBE_SUCCESS;
2662 }
2663 
2664 /**
2665  *  ixgbe_enable_mc_generic - Enable multicast address in RAR
2666  *  @hw: pointer to hardware structure
2667  *
2668  *  Enables multicast address in RAR and the use of the multicast hash table.
2669  **/
2670 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2671 {
2672 	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2673 
2674 	DEBUGFUNC("ixgbe_enable_mc_generic");
2675 
2676 	if (a->mta_in_use > 0)
2677 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2678 				hw->mac.mc_filter_type);
2679 
2680 	return IXGBE_SUCCESS;
2681 }
2682 
2683 /**
2684  *  ixgbe_disable_mc_generic - Disable multicast address in RAR
2685  *  @hw: pointer to hardware structure
2686  *
2687  *  Disables multicast address in RAR and the use of the multicast hash table.
2688  **/
2689 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2690 {
2691 	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2692 
2693 	DEBUGFUNC("ixgbe_disable_mc_generic");
2694 
2695 	if (a->mta_in_use > 0)
2696 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2697 
2698 	return IXGBE_SUCCESS;
2699 }
2700 
2701 /**
2702  *  ixgbe_fc_enable_generic - Enable flow control
2703  *  @hw: pointer to hardware structure
2704  *
2705  *  Enable flow control according to the current settings.
2706  **/
2707 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2708 {
2709 	s32 ret_val = IXGBE_SUCCESS;
2710 	u32 mflcn_reg, fccfg_reg;
2711 	u32 reg;
2712 	u32 fcrtl, fcrth;
2713 	int i;
2714 
2715 	DEBUGFUNC("ixgbe_fc_enable_generic");
2716 
2717 	/* Validate the water mark configuration */
2718 	if (!hw->fc.pause_time) {
2719 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2720 		goto out;
2721 	}
2722 
2723 	/* Low water mark of zero causes XOFF floods */
2724 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2725 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2726 		    hw->fc.high_water[i]) {
2727 			if (!hw->fc.low_water[i] ||
2728 			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2729 				DEBUGOUT("Invalid water mark configuration\n");
2730 				ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2731 				goto out;
2732 			}
2733 		}
2734 	}
2735 
2736 	/* Negotiate the fc mode to use */
2737 	ixgbe_fc_autoneg(hw);
2738 
2739 	/* Disable any previous flow control settings */
2740 	mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2741 	mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2742 
2743 	fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2744 	fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2745 
2746 	/*
2747 	 * The possible values of fc.current_mode are:
2748 	 * 0: Flow control is completely disabled
2749 	 * 1: Rx flow control is enabled (we can receive pause frames,
2750 	 *    but not send pause frames).
2751 	 * 2: Tx flow control is enabled (we can send pause frames but
2752 	 *    we do not support receiving pause frames).
2753 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2754 	 * other: Invalid.
2755 	 */
2756 	switch (hw->fc.current_mode) {
2757 	case ixgbe_fc_none:
2758 		/*
2759 		 * Flow control is disabled by software override or autoneg.
2760 		 * The code below will actually disable it in the HW.
2761 		 */
2762 		break;
2763 	case ixgbe_fc_rx_pause:
2764 		/*
2765 		 * Rx Flow control is enabled and Tx Flow control is
2766 		 * disabled by software override. Since there really
2767 		 * isn't a way to advertise that we are capable of RX
2768 		 * Pause ONLY, we will advertise that we support both
2769 		 * symmetric and asymmetric Rx PAUSE.  Later, we will
2770 		 * disable the adapter's ability to send PAUSE frames.
2771 		 */
2772 		mflcn_reg |= IXGBE_MFLCN_RFCE;
2773 		break;
2774 	case ixgbe_fc_tx_pause:
2775 		/*
2776 		 * Tx Flow control is enabled, and Rx Flow control is
2777 		 * disabled by software override.
2778 		 */
2779 		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2780 		break;
2781 	case ixgbe_fc_full:
2782 		/* Flow control (both Rx and Tx) is enabled by SW override. */
2783 		mflcn_reg |= IXGBE_MFLCN_RFCE;
2784 		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2785 		break;
2786 	default:
2787 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
2788 			     "Flow control param set incorrectly\n");
2789 		ret_val = IXGBE_ERR_CONFIG;
2790 		goto out;
2791 		break;
2792 	}
2793 
2794 	/* Set 802.3x based flow control settings. */
2795 	mflcn_reg |= IXGBE_MFLCN_DPF;
2796 	IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2797 	IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2798 
2799 
2800 	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
2801 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2802 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2803 		    hw->fc.high_water[i]) {
2804 			fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2805 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2806 			fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2807 		} else {
2808 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2809 			/*
2810 			 * In order to prevent Tx hangs when the internal Tx
2811 			 * switch is enabled we must set the high water mark
2812 			 * to the Rx packet buffer size - 24KB.  This allows
2813 			 * the Tx switch to function even under heavy Rx
2814 			 * workloads.
2815 			 */
2816 			fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
2817 		}
2818 
2819 		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2820 	}
2821 
2822 	/* Configure pause time (2 TCs per register) */
2823 	reg = hw->fc.pause_time * 0x00010001;
2824 	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2825 		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2826 
2827 	/* Configure flow control refresh threshold value */
2828 	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2829 
2830 out:
2831 	return ret_val;
2832 }
2833 
2834 /**
2835  *  ixgbe_negotiate_fc - Negotiate flow control
2836  *  @hw: pointer to hardware structure
2837  *  @adv_reg: flow control advertised settings
2838  *  @lp_reg: link partner's flow control settings
2839  *  @adv_sym: symmetric pause bit in advertisement
2840  *  @adv_asm: asymmetric pause bit in advertisement
2841  *  @lp_sym: symmetric pause bit in link partner advertisement
2842  *  @lp_asm: asymmetric pause bit in link partner advertisement
2843  *
2844  *  Find the intersection between advertised settings and link partner's
2845  *  advertised settings
2846  **/
2847 static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2848 			      u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2849 {
2850 	if ((!(adv_reg)) ||  (!(lp_reg))) {
2851 		ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
2852 			     "Local or link partner's advertised flow control "
2853 			     "settings are NULL. Local: %x, link partner: %x\n",
2854 			     adv_reg, lp_reg);
2855 		return IXGBE_ERR_FC_NOT_NEGOTIATED;
2856 	}
2857 
2858 	if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2859 		/*
2860 		 * Now we need to check if the user selected Rx ONLY
2861 		 * of pause frames.  In this case, we had to advertise
2862 		 * FULL flow control because we could not advertise RX
2863 		 * ONLY. Hence, we must now check to see if we need to
2864 		 * turn OFF the TRANSMISSION of PAUSE frames.
2865 		 */
2866 		if (hw->fc.requested_mode == ixgbe_fc_full) {
2867 			hw->fc.current_mode = ixgbe_fc_full;
2868 			DEBUGOUT("Flow Control = FULL.\n");
2869 		} else {
2870 			hw->fc.current_mode = ixgbe_fc_rx_pause;
2871 			DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2872 		}
2873 	} else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2874 		   (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2875 		hw->fc.current_mode = ixgbe_fc_tx_pause;
2876 		DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2877 	} else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2878 		   !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2879 		hw->fc.current_mode = ixgbe_fc_rx_pause;
2880 		DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2881 	} else {
2882 		hw->fc.current_mode = ixgbe_fc_none;
2883 		DEBUGOUT("Flow Control = NONE.\n");
2884 	}
2885 	return IXGBE_SUCCESS;
2886 }
2887 
2888 /**
2889  *  ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2890  *  @hw: pointer to hardware structure
2891  *
2892  *  Enable flow control according on 1 gig fiber.
2893  **/
2894 static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2895 {
2896 	u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
2897 	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2898 
2899 	/*
2900 	 * On multispeed fiber at 1g, bail out if
2901 	 * - link is up but AN did not complete, or if
2902 	 * - link is up and AN completed but timed out
2903 	 */
2904 
2905 	linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2906 	if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2907 	    (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
2908 		DEBUGOUT("Auto-Negotiation did not complete or timed out\n");
2909 		goto out;
2910 	}
2911 
2912 	pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2913 	pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
2914 
2915 	ret_val =  ixgbe_negotiate_fc(hw, pcs_anadv_reg,
2916 				      pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
2917 				      IXGBE_PCS1GANA_ASM_PAUSE,
2918 				      IXGBE_PCS1GANA_SYM_PAUSE,
2919 				      IXGBE_PCS1GANA_ASM_PAUSE);
2920 
2921 out:
2922 	return ret_val;
2923 }
2924 
2925 /**
2926  *  ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
2927  *  @hw: pointer to hardware structure
2928  *
2929  *  Enable flow control according to IEEE clause 37.
2930  **/
2931 static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2932 {
2933 	u32 links2, anlp1_reg, autoc_reg, links;
2934 	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2935 
2936 	/*
2937 	 * On backplane, bail out if
2938 	 * - backplane autoneg was not completed, or if
2939 	 * - we are 82599 and link partner is not AN enabled
2940 	 */
2941 	links = IXGBE_READ_REG(hw, IXGBE_LINKS);
2942 	if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
2943 		DEBUGOUT("Auto-Negotiation did not complete\n");
2944 		goto out;
2945 	}
2946 
2947 	if (hw->mac.type == ixgbe_mac_82599EB) {
2948 		links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2949 		if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
2950 			DEBUGOUT("Link partner is not AN enabled\n");
2951 			goto out;
2952 		}
2953 	}
2954 	/*
2955 	 * Read the 10g AN autoc and LP ability registers and resolve
2956 	 * local flow control settings accordingly
2957 	 */
2958 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2959 	anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2960 
2961 	ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
2962 		anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
2963 		IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
2964 
2965 out:
2966 	return ret_val;
2967 }
2968 
2969 /**
2970  *  ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
2971  *  @hw: pointer to hardware structure
2972  *
2973  *  Enable flow control according to IEEE clause 37.
2974  **/
2975 static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
2976 {
2977 	u16 technology_ability_reg = 0;
2978 	u16 lp_technology_ability_reg = 0;
2979 
2980 	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
2981 			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2982 			     &technology_ability_reg);
2983 	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
2984 			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2985 			     &lp_technology_ability_reg);
2986 
2987 	return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
2988 				  (u32)lp_technology_ability_reg,
2989 				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
2990 				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
2991 }
2992 
2993 /**
2994  *  ixgbe_fc_autoneg - Configure flow control
2995  *  @hw: pointer to hardware structure
2996  *
2997  *  Compares our advertised flow control capabilities to those advertised by
2998  *  our link partner, and determines the proper flow control mode to use.
2999  **/
3000 void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
3001 {
3002 	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3003 	ixgbe_link_speed speed;
3004 	bool link_up;
3005 
3006 	DEBUGFUNC("ixgbe_fc_autoneg");
3007 
3008 	/*
3009 	 * AN should have completed when the cable was plugged in.
3010 	 * Look for reasons to bail out.  Bail out if:
3011 	 * - FC autoneg is disabled, or if
3012 	 * - link is not up.
3013 	 */
3014 	if (hw->fc.disable_fc_autoneg) {
3015 		ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
3016 			     "Flow control autoneg is disabled");
3017 		goto out;
3018 	}
3019 
3020 	hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
3021 	if (!link_up) {
3022 		ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
3023 		goto out;
3024 	}
3025 
3026 	switch (hw->phy.media_type) {
3027 	/* Autoneg flow control on fiber adapters */
3028 	case ixgbe_media_type_fiber_fixed:
3029 	case ixgbe_media_type_fiber_qsfp:
3030 	case ixgbe_media_type_fiber:
3031 		if (speed == IXGBE_LINK_SPEED_1GB_FULL)
3032 			ret_val = ixgbe_fc_autoneg_fiber(hw);
3033 		break;
3034 
3035 	/* Autoneg flow control on backplane adapters */
3036 	case ixgbe_media_type_backplane:
3037 		ret_val = ixgbe_fc_autoneg_backplane(hw);
3038 		break;
3039 
3040 	/* Autoneg flow control on copper adapters */
3041 	case ixgbe_media_type_copper:
3042 		if (ixgbe_device_supports_autoneg_fc(hw))
3043 			ret_val = ixgbe_fc_autoneg_copper(hw);
3044 		break;
3045 
3046 	default:
3047 		break;
3048 	}
3049 
3050 out:
3051 	if (ret_val == IXGBE_SUCCESS) {
3052 		hw->fc.fc_was_autonegged = TRUE;
3053 	} else {
3054 		hw->fc.fc_was_autonegged = FALSE;
3055 		hw->fc.current_mode = hw->fc.requested_mode;
3056 	}
3057 }
3058 
3059 /*
3060  * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
3061  * @hw: pointer to hardware structure
3062  *
3063  * System-wide timeout range is encoded in PCIe Device Control2 register.
3064  *
3065  * Add 10% to specified maximum and return the number of times to poll for
3066  * completion timeout, in units of 100 microsec.  Never return less than
3067  * 800 = 80 millisec.
3068  */
3069 static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
3070 {
3071 	s16 devctl2;
3072 	u32 pollcnt;
3073 
3074 	devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
3075 	devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
3076 
3077 	switch (devctl2) {
3078 	case IXGBE_PCIDEVCTRL2_65_130ms:
3079 		pollcnt = 1300;		/* 130 millisec */
3080 		break;
3081 	case IXGBE_PCIDEVCTRL2_260_520ms:
3082 		pollcnt = 5200;		/* 520 millisec */
3083 		break;
3084 	case IXGBE_PCIDEVCTRL2_1_2s:
3085 		pollcnt = 20000;	/* 2 sec */
3086 		break;
3087 	case IXGBE_PCIDEVCTRL2_4_8s:
3088 		pollcnt = 80000;	/* 8 sec */
3089 		break;
3090 	case IXGBE_PCIDEVCTRL2_17_34s:
3091 		pollcnt = 34000;	/* 34 sec */
3092 		break;
3093 	case IXGBE_PCIDEVCTRL2_50_100us:	/* 100 microsecs */
3094 	case IXGBE_PCIDEVCTRL2_1_2ms:		/* 2 millisecs */
3095 	case IXGBE_PCIDEVCTRL2_16_32ms:		/* 32 millisec */
3096 	case IXGBE_PCIDEVCTRL2_16_32ms_def:	/* 32 millisec default */
3097 	default:
3098 		pollcnt = 800;		/* 80 millisec minimum */
3099 		break;
3100 	}
3101 
3102 	/* add 10% to spec maximum */
3103 	return (pollcnt * 11) / 10;
3104 }
3105 
3106 /**
3107  *  ixgbe_disable_pcie_master - Disable PCI-express master access
3108  *  @hw: pointer to hardware structure
3109  *
3110  *  Disables PCI-Express master access and verifies there are no pending
3111  *  requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
3112  *  bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
3113  *  is returned signifying master requests disabled.
3114  **/
3115 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
3116 {
3117 	s32 status = IXGBE_SUCCESS;
3118 	u32 i, poll;
3119 	u16 value;
3120 
3121 	DEBUGFUNC("ixgbe_disable_pcie_master");
3122 
3123 	/* Always set this bit to ensure any future transactions are blocked */
3124 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
3125 
3126 	/* Exit if master requests are blocked */
3127 	if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
3128 	    IXGBE_REMOVED(hw->hw_addr))
3129 		goto out;
3130 
3131 	/* Poll for master request bit to clear */
3132 	for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
3133 		usec_delay(100);
3134 		if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
3135 			goto out;
3136 	}
3137 
3138 	/*
3139 	 * Two consecutive resets are required via CTRL.RST per datasheet
3140 	 * 5.2.5.3.2 Master Disable.  We set a flag to inform the reset routine
3141 	 * of this need.  The first reset prevents new master requests from
3142 	 * being issued by our device.  We then must wait 1usec or more for any
3143 	 * remaining completions from the PCIe bus to trickle in, and then reset
3144 	 * again to clear out any effects they may have had on our device.
3145 	 */
3146 	DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
3147 	hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
3148 
3149 	/*
3150 	 * Before proceeding, make sure that the PCIe block does not have
3151 	 * transactions pending.
3152 	 */
3153 	poll = ixgbe_pcie_timeout_poll(hw);
3154 	for (i = 0; i < poll; i++) {
3155 		usec_delay(100);
3156 		value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
3157 		if (IXGBE_REMOVED(hw->hw_addr))
3158 			goto out;
3159 		if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
3160 			goto out;
3161 	}
3162 
3163 	ERROR_REPORT1(IXGBE_ERROR_POLLING,
3164 		     "PCIe transaction pending bit also did not clear.\n");
3165 	status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
3166 
3167 out:
3168 	return status;
3169 }
3170 
3171 /**
3172  *  ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
3173  *  @hw: pointer to hardware structure
3174  *  @mask: Mask to specify which semaphore to acquire
3175  *
3176  *  Acquires the SWFW semaphore through the GSSR register for the specified
3177  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
3178  **/
3179 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
3180 {
3181 	u32 gssr = 0;
3182 	u32 swmask = mask;
3183 	u32 fwmask = mask << 5;
3184 	u32 timeout = 200;
3185 	u32 i;
3186 
3187 	DEBUGFUNC("ixgbe_acquire_swfw_sync");
3188 
3189 	for (i = 0; i < timeout; i++) {
3190 		/*
3191 		 * SW NVM semaphore bit is used for access to all
3192 		 * SW_FW_SYNC bits (not just NVM)
3193 		 */
3194 		if (ixgbe_get_eeprom_semaphore(hw))
3195 			return IXGBE_ERR_SWFW_SYNC;
3196 
3197 		gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3198 		if (!(gssr & (fwmask | swmask))) {
3199 			gssr |= swmask;
3200 			IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3201 			ixgbe_release_eeprom_semaphore(hw);
3202 			return IXGBE_SUCCESS;
3203 		} else {
3204 			/* Resource is currently in use by FW or SW */
3205 			ixgbe_release_eeprom_semaphore(hw);
3206 			msec_delay(5);
3207 		}
3208 	}
3209 
3210 	/* If time expired clear the bits holding the lock and retry */
3211 	if (gssr & (fwmask | swmask))
3212 		ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
3213 
3214 	msec_delay(5);
3215 	return IXGBE_ERR_SWFW_SYNC;
3216 }
3217 
3218 /**
3219  *  ixgbe_release_swfw_sync - Release SWFW semaphore
3220  *  @hw: pointer to hardware structure
3221  *  @mask: Mask to specify which semaphore to release
3222  *
3223  *  Releases the SWFW semaphore through the GSSR register for the specified
3224  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
3225  **/
3226 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
3227 {
3228 	u32 gssr;
3229 	u32 swmask = mask;
3230 
3231 	DEBUGFUNC("ixgbe_release_swfw_sync");
3232 
3233 	ixgbe_get_eeprom_semaphore(hw);
3234 
3235 	gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3236 	gssr &= ~swmask;
3237 	IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3238 
3239 	ixgbe_release_eeprom_semaphore(hw);
3240 }
3241 
3242 /**
3243  *  ixgbe_disable_sec_rx_path_generic - Stops the receive data path
3244  *  @hw: pointer to hardware structure
3245  *
3246  *  Stops the receive data path and waits for the HW to internally empty
3247  *  the Rx security block
3248  **/
3249 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
3250 {
3251 #define IXGBE_MAX_SECRX_POLL 40
3252 
3253 	int i;
3254 	int secrxreg;
3255 
3256 	DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
3257 
3258 
3259 	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3260 	secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
3261 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3262 	for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
3263 		secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
3264 		if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
3265 			break;
3266 		else
3267 			/* Use interrupt-safe sleep just in case */
3268 			usec_delay(1000);
3269 	}
3270 
3271 	/* For informational purposes only */
3272 	if (i >= IXGBE_MAX_SECRX_POLL)
3273 		DEBUGOUT("Rx unit being enabled before security "
3274 			 "path fully disabled.  Continuing with init.\n");
3275 
3276 	return IXGBE_SUCCESS;
3277 }
3278 
3279 /**
3280  *  prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
3281  *  @hw: pointer to hardware structure
3282  *  @reg_val: Value we read from AUTOC
3283  *
3284  *  The default case requires no protection so just to the register read.
3285  */
3286 s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
3287 {
3288 	*locked = FALSE;
3289 	*reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3290 	return IXGBE_SUCCESS;
3291 }
3292 
3293 /**
3294  * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
3295  * @hw: pointer to hardware structure
3296  * @reg_val: value to write to AUTOC
3297  * @locked: bool to indicate whether the SW/FW lock was already taken by
3298  *           previous read.
3299  *
3300  * The default case requires no protection so just to the register write.
3301  */
3302 s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
3303 {
3304 	UNREFERENCED_1PARAMETER(locked);
3305 
3306 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
3307 	return IXGBE_SUCCESS;
3308 }
3309 
3310 /**
3311  *  ixgbe_enable_sec_rx_path_generic - Enables the receive data path
3312  *  @hw: pointer to hardware structure
3313  *
3314  *  Enables the receive data path.
3315  **/
3316 s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
3317 {
3318 	int secrxreg;
3319 
3320 	DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
3321 
3322 	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3323 	secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
3324 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3325 	IXGBE_WRITE_FLUSH(hw);
3326 
3327 	return IXGBE_SUCCESS;
3328 }
3329 
3330 /**
3331  *  ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
3332  *  @hw: pointer to hardware structure
3333  *  @regval: register value to write to RXCTRL
3334  *
3335  *  Enables the Rx DMA unit
3336  **/
3337 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
3338 {
3339 	DEBUGFUNC("ixgbe_enable_rx_dma_generic");
3340 
3341 	if (regval & IXGBE_RXCTRL_RXEN)
3342 		ixgbe_enable_rx(hw);
3343 	else
3344 		ixgbe_disable_rx(hw);
3345 
3346 	return IXGBE_SUCCESS;
3347 }
3348 
3349 /**
3350  *  ixgbe_blink_led_start_generic - Blink LED based on index.
3351  *  @hw: pointer to hardware structure
3352  *  @index: led number to blink
3353  **/
3354 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
3355 {
3356 	ixgbe_link_speed speed = 0;
3357 	bool link_up = 0;
3358 	u32 autoc_reg = 0;
3359 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3360 	s32 ret_val = IXGBE_SUCCESS;
3361 	bool locked = FALSE;
3362 
3363 	DEBUGFUNC("ixgbe_blink_led_start_generic");
3364 
3365 	/*
3366 	 * Link must be up to auto-blink the LEDs;
3367 	 * Force it if link is down.
3368 	 */
3369 	hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
3370 
3371 	if (!link_up) {
3372 		ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3373 		if (ret_val != IXGBE_SUCCESS)
3374 			goto out;
3375 
3376 		autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3377 		autoc_reg |= IXGBE_AUTOC_FLU;
3378 
3379 		ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3380 		if (ret_val != IXGBE_SUCCESS)
3381 			goto out;
3382 
3383 		IXGBE_WRITE_FLUSH(hw);
3384 		msec_delay(10);
3385 	}
3386 
3387 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
3388 	led_reg |= IXGBE_LED_BLINK(index);
3389 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3390 	IXGBE_WRITE_FLUSH(hw);
3391 
3392 out:
3393 	return ret_val;
3394 }
3395 
3396 /**
3397  *  ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
3398  *  @hw: pointer to hardware structure
3399  *  @index: led number to stop blinking
3400  **/
3401 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
3402 {
3403 	u32 autoc_reg = 0;
3404 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3405 	s32 ret_val = IXGBE_SUCCESS;
3406 	bool locked = FALSE;
3407 
3408 	DEBUGFUNC("ixgbe_blink_led_stop_generic");
3409 
3410 	ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3411 	if (ret_val != IXGBE_SUCCESS)
3412 		goto out;
3413 
3414 	autoc_reg &= ~IXGBE_AUTOC_FLU;
3415 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3416 
3417 	ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3418 	if (ret_val != IXGBE_SUCCESS)
3419 		goto out;
3420 
3421 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
3422 	led_reg &= ~IXGBE_LED_BLINK(index);
3423 	led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
3424 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3425 	IXGBE_WRITE_FLUSH(hw);
3426 
3427 out:
3428 	return ret_val;
3429 }
3430 
3431 /**
3432  *  ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
3433  *  @hw: pointer to hardware structure
3434  *  @san_mac_offset: SAN MAC address offset
3435  *
3436  *  This function will read the EEPROM location for the SAN MAC address
3437  *  pointer, and returns the value at that location.  This is used in both
3438  *  get and set mac_addr routines.
3439  **/
3440 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
3441 					 u16 *san_mac_offset)
3442 {
3443 	s32 ret_val;
3444 
3445 	DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
3446 
3447 	/*
3448 	 * First read the EEPROM pointer to see if the MAC addresses are
3449 	 * available.
3450 	 */
3451 	ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
3452 				      san_mac_offset);
3453 	if (ret_val) {
3454 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3455 			      "eeprom at offset %d failed",
3456 			      IXGBE_SAN_MAC_ADDR_PTR);
3457 	}
3458 
3459 	return ret_val;
3460 }
3461 
3462 /**
3463  *  ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
3464  *  @hw: pointer to hardware structure
3465  *  @san_mac_addr: SAN MAC address
3466  *
3467  *  Reads the SAN MAC address from the EEPROM, if it's available.  This is
3468  *  per-port, so set_lan_id() must be called before reading the addresses.
3469  *  set_lan_id() is called by identify_sfp(), but this cannot be relied
3470  *  upon for non-SFP connections, so we must call it here.
3471  **/
3472 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3473 {
3474 	u16 san_mac_data, san_mac_offset;
3475 	u8 i;
3476 	s32 ret_val;
3477 
3478 	DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
3479 
3480 	/*
3481 	 * First read the EEPROM pointer to see if the MAC addresses are
3482 	 * available.  If they're not, no point in calling set_lan_id() here.
3483 	 */
3484 	ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3485 	if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3486 		goto san_mac_addr_out;
3487 
3488 	/* make sure we know which port we need to program */
3489 	hw->mac.ops.set_lan_id(hw);
3490 	/* apply the port offset to the address offset */
3491 	(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3492 			 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3493 	for (i = 0; i < 3; i++) {
3494 		ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
3495 					      &san_mac_data);
3496 		if (ret_val) {
3497 			ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3498 				      "eeprom read at offset %d failed",
3499 				      san_mac_offset);
3500 			goto san_mac_addr_out;
3501 		}
3502 		san_mac_addr[i * 2] = (u8)(san_mac_data);
3503 		san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
3504 		san_mac_offset++;
3505 	}
3506 	return IXGBE_SUCCESS;
3507 
3508 san_mac_addr_out:
3509 	/*
3510 	 * No addresses available in this EEPROM.  It's not an
3511 	 * error though, so just wipe the local address and return.
3512 	 */
3513 	for (i = 0; i < 6; i++)
3514 		san_mac_addr[i] = 0xFF;
3515 	return IXGBE_SUCCESS;
3516 }
3517 
3518 /**
3519  *  ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
3520  *  @hw: pointer to hardware structure
3521  *  @san_mac_addr: SAN MAC address
3522  *
3523  *  Write a SAN MAC address to the EEPROM.
3524  **/
3525 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3526 {
3527 	s32 ret_val;
3528 	u16 san_mac_data, san_mac_offset;
3529 	u8 i;
3530 
3531 	DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
3532 
3533 	/* Look for SAN mac address pointer.  If not defined, return */
3534 	ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3535 	if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3536 		return IXGBE_ERR_NO_SAN_ADDR_PTR;
3537 
3538 	/* Make sure we know which port we need to write */
3539 	hw->mac.ops.set_lan_id(hw);
3540 	/* Apply the port offset to the address offset */
3541 	(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3542 			 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3543 
3544 	for (i = 0; i < 3; i++) {
3545 		san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
3546 		san_mac_data |= (u16)(san_mac_addr[i * 2]);
3547 		hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
3548 		san_mac_offset++;
3549 	}
3550 
3551 	return IXGBE_SUCCESS;
3552 }
3553 
3554 /**
3555  *  ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
3556  *  @hw: pointer to hardware structure
3557  *
3558  *  Read PCIe configuration space, and get the MSI-X vector count from
3559  *  the capabilities table.
3560  **/
3561 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
3562 {
3563 	u16 msix_count = 1;
3564 	u16 max_msix_count;
3565 	u16 pcie_offset;
3566 
3567 	switch (hw->mac.type) {
3568 	case ixgbe_mac_82598EB:
3569 		pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
3570 		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
3571 		break;
3572 	case ixgbe_mac_82599EB:
3573 	case ixgbe_mac_X540:
3574 	case ixgbe_mac_X550:
3575 	case ixgbe_mac_X550EM_x:
3576 	case ixgbe_mac_X550EM_a:
3577 		pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
3578 		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
3579 		break;
3580 	default:
3581 		return msix_count;
3582 	}
3583 
3584 	DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
3585 	msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
3586 	if (IXGBE_REMOVED(hw->hw_addr))
3587 		msix_count = 0;
3588 	msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
3589 
3590 	/* MSI-X count is zero-based in HW */
3591 	msix_count++;
3592 
3593 	if (msix_count > max_msix_count)
3594 		msix_count = max_msix_count;
3595 
3596 	return msix_count;
3597 }
3598 
3599 /**
3600  *  ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
3601  *  @hw: pointer to hardware structure
3602  *  @addr: Address to put into receive address register
3603  *  @vmdq: VMDq pool to assign
3604  *
3605  *  Puts an ethernet address into a receive address register, or
3606  *  finds the rar that it is aleady in; adds to the pool list
3607  **/
3608 s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
3609 {
3610 	static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
3611 	u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
3612 	u32 rar;
3613 	u32 rar_low, rar_high;
3614 	u32 addr_low, addr_high;
3615 
3616 	DEBUGFUNC("ixgbe_insert_mac_addr_generic");
3617 
3618 	/* swap bytes for HW little endian */
3619 	addr_low  = addr[0] | (addr[1] << 8)
3620 			    | (addr[2] << 16)
3621 			    | (addr[3] << 24);
3622 	addr_high = addr[4] | (addr[5] << 8);
3623 
3624 	/*
3625 	 * Either find the mac_id in rar or find the first empty space.
3626 	 * rar_highwater points to just after the highest currently used
3627 	 * rar in order to shorten the search.  It grows when we add a new
3628 	 * rar to the top.
3629 	 */
3630 	for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
3631 		rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
3632 
3633 		if (((IXGBE_RAH_AV & rar_high) == 0)
3634 		    && first_empty_rar == NO_EMPTY_RAR_FOUND) {
3635 			first_empty_rar = rar;
3636 		} else if ((rar_high & 0xFFFF) == addr_high) {
3637 			rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
3638 			if (rar_low == addr_low)
3639 				break;    /* found it already in the rars */
3640 		}
3641 	}
3642 
3643 	if (rar < hw->mac.rar_highwater) {
3644 		/* already there so just add to the pool bits */
3645 		ixgbe_set_vmdq(hw, rar, vmdq);
3646 	} else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3647 		/* stick it into first empty RAR slot we found */
3648 		rar = first_empty_rar;
3649 		ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3650 	} else if (rar == hw->mac.rar_highwater) {
3651 		/* add it to the top of the list and inc the highwater mark */
3652 		ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3653 		hw->mac.rar_highwater++;
3654 	} else if (rar >= hw->mac.num_rar_entries) {
3655 		return IXGBE_ERR_INVALID_MAC_ADDR;
3656 	}
3657 
3658 	/*
3659 	 * If we found rar[0], make sure the default pool bit (we use pool 0)
3660 	 * remains cleared to be sure default pool packets will get delivered
3661 	 */
3662 	if (rar == 0)
3663 		ixgbe_clear_vmdq(hw, rar, 0);
3664 
3665 	return rar;
3666 }
3667 
3668 /**
3669  *  ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3670  *  @hw: pointer to hardware struct
3671  *  @rar: receive address register index to disassociate
3672  *  @vmdq: VMDq pool index to remove from the rar
3673  **/
3674 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3675 {
3676 	u32 mpsar_lo, mpsar_hi;
3677 	u32 rar_entries = hw->mac.num_rar_entries;
3678 
3679 	DEBUGFUNC("ixgbe_clear_vmdq_generic");
3680 
3681 	/* Make sure we are using a valid rar index range */
3682 	if (rar >= rar_entries) {
3683 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3684 			     "RAR index %d is out of range.\n", rar);
3685 		return IXGBE_ERR_INVALID_ARGUMENT;
3686 	}
3687 
3688 	mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3689 	mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3690 
3691 	if (IXGBE_REMOVED(hw->hw_addr))
3692 		goto done;
3693 
3694 	if (!mpsar_lo && !mpsar_hi)
3695 		goto done;
3696 
3697 	if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
3698 		if (mpsar_lo) {
3699 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3700 			mpsar_lo = 0;
3701 		}
3702 		if (mpsar_hi) {
3703 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3704 			mpsar_hi = 0;
3705 		}
3706 	} else if (vmdq < 32) {
3707 		mpsar_lo &= ~(1 << vmdq);
3708 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
3709 	} else {
3710 		mpsar_hi &= ~(1 << (vmdq - 32));
3711 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
3712 	}
3713 
3714 	/* was that the last pool using this rar? */
3715 	if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
3716 		hw->mac.ops.clear_rar(hw, rar);
3717 done:
3718 	return IXGBE_SUCCESS;
3719 }
3720 
3721 /**
3722  *  ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
3723  *  @hw: pointer to hardware struct
3724  *  @rar: receive address register index to associate with a VMDq index
3725  *  @vmdq: VMDq pool index
3726  **/
3727 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3728 {
3729 	u32 mpsar;
3730 	u32 rar_entries = hw->mac.num_rar_entries;
3731 
3732 	DEBUGFUNC("ixgbe_set_vmdq_generic");
3733 
3734 	/* Make sure we are using a valid rar index range */
3735 	if (rar >= rar_entries) {
3736 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3737 			     "RAR index %d is out of range.\n", rar);
3738 		return IXGBE_ERR_INVALID_ARGUMENT;
3739 	}
3740 
3741 	if (vmdq < 32) {
3742 		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3743 		mpsar |= 1 << vmdq;
3744 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3745 	} else {
3746 		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3747 		mpsar |= 1 << (vmdq - 32);
3748 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3749 	}
3750 	return IXGBE_SUCCESS;
3751 }
3752 
3753 /**
3754  *  This function should only be involved in the IOV mode.
3755  *  In IOV mode, Default pool is next pool after the number of
3756  *  VFs advertized and not 0.
3757  *  MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
3758  *
3759  *  ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
3760  *  @hw: pointer to hardware struct
3761  *  @vmdq: VMDq pool index
3762  **/
3763 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
3764 {
3765 	u32 rar = hw->mac.san_mac_rar_index;
3766 
3767 	DEBUGFUNC("ixgbe_set_vmdq_san_mac");
3768 
3769 	if (vmdq < 32) {
3770 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
3771 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3772 	} else {
3773 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3774 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
3775 	}
3776 
3777 	return IXGBE_SUCCESS;
3778 }
3779 
3780 /**
3781  *  ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3782  *  @hw: pointer to hardware structure
3783  **/
3784 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3785 {
3786 	int i;
3787 
3788 	DEBUGFUNC("ixgbe_init_uta_tables_generic");
3789 	DEBUGOUT(" Clearing UTA\n");
3790 
3791 	for (i = 0; i < 128; i++)
3792 		IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3793 
3794 	return IXGBE_SUCCESS;
3795 }
3796 
3797 /**
3798  *  ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3799  *  @hw: pointer to hardware structure
3800  *  @vlan: VLAN id to write to VLAN filter
3801  *
3802  *  return the VLVF index where this VLAN id should be placed
3803  *
3804  **/
3805 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
3806 {
3807 	u32 bits = 0;
3808 	u32 first_empty_slot = 0;
3809 	s32 regindex;
3810 
3811 	/* short cut the special case */
3812 	if (vlan == 0)
3813 		return 0;
3814 
3815 	/*
3816 	  * Search for the vlan id in the VLVF entries. Save off the first empty
3817 	  * slot found along the way
3818 	  */
3819 	for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
3820 		bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3821 		if (!bits && !(first_empty_slot))
3822 			first_empty_slot = regindex;
3823 		else if ((bits & 0x0FFF) == vlan)
3824 			break;
3825 	}
3826 
3827 	/*
3828 	  * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
3829 	  * in the VLVF. Else use the first empty VLVF register for this
3830 	  * vlan id.
3831 	  */
3832 	if (regindex >= IXGBE_VLVF_ENTRIES) {
3833 		if (first_empty_slot)
3834 			regindex = first_empty_slot;
3835 		else {
3836 			ERROR_REPORT1(IXGBE_ERROR_SOFTWARE,
3837 				     "No space in VLVF.\n");
3838 			regindex = IXGBE_ERR_NO_SPACE;
3839 		}
3840 	}
3841 
3842 	return regindex;
3843 }
3844 
3845 /**
3846  *  ixgbe_set_vfta_generic - Set VLAN filter table
3847  *  @hw: pointer to hardware structure
3848  *  @vlan: VLAN id to write to VLAN filter
3849  *  @vind: VMDq output index that maps queue to VLAN id in VFVFB
3850  *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
3851  *
3852  *  Turn on/off specified VLAN in the VLAN filter table.
3853  **/
3854 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3855 			   bool vlan_on)
3856 {
3857 	s32 regindex;
3858 	u32 bitindex;
3859 	u32 vfta;
3860 	u32 targetbit;
3861 	s32 ret_val = IXGBE_SUCCESS;
3862 	bool vfta_changed = FALSE;
3863 
3864 	DEBUGFUNC("ixgbe_set_vfta_generic");
3865 
3866 	if (vlan > 4095)
3867 		return IXGBE_ERR_PARAM;
3868 
3869 	/*
3870 	 * this is a 2 part operation - first the VFTA, then the
3871 	 * VLVF and VLVFB if VT Mode is set
3872 	 * We don't write the VFTA until we know the VLVF part succeeded.
3873 	 */
3874 
3875 	/* Part 1
3876 	 * The VFTA is a bitstring made up of 128 32-bit registers
3877 	 * that enable the particular VLAN id, much like the MTA:
3878 	 *    bits[11-5]: which register
3879 	 *    bits[4-0]:  which bit in the register
3880 	 */
3881 	regindex = (vlan >> 5) & 0x7F;
3882 	bitindex = vlan & 0x1F;
3883 	targetbit = (1 << bitindex);
3884 	vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
3885 
3886 	if (vlan_on) {
3887 		if (!(vfta & targetbit)) {
3888 			vfta |= targetbit;
3889 			vfta_changed = TRUE;
3890 		}
3891 	} else {
3892 		if ((vfta & targetbit)) {
3893 			vfta &= ~targetbit;
3894 			vfta_changed = TRUE;
3895 		}
3896 	}
3897 
3898 	/* Part 2
3899 	 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
3900 	 */
3901 	ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on,
3902 					 &vfta_changed);
3903 	if (ret_val != IXGBE_SUCCESS)
3904 		return ret_val;
3905 
3906 	if (vfta_changed)
3907 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
3908 
3909 	return IXGBE_SUCCESS;
3910 }
3911 
3912 /**
3913  *  ixgbe_set_vlvf_generic - Set VLAN Pool Filter
3914  *  @hw: pointer to hardware structure
3915  *  @vlan: VLAN id to write to VLAN filter
3916  *  @vind: VMDq output index that maps queue to VLAN id in VFVFB
3917  *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
3918  *  @vfta_changed: pointer to boolean flag which indicates whether VFTA
3919  *                 should be changed
3920  *
3921  *  Turn on/off specified bit in VLVF table.
3922  **/
3923 s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3924 			    bool vlan_on, bool *vfta_changed)
3925 {
3926 	u32 vt;
3927 
3928 	DEBUGFUNC("ixgbe_set_vlvf_generic");
3929 
3930 	if (vlan > 4095)
3931 		return IXGBE_ERR_PARAM;
3932 
3933 	/* If VT Mode is set
3934 	 *   Either vlan_on
3935 	 *     make sure the vlan is in VLVF
3936 	 *     set the vind bit in the matching VLVFB
3937 	 *   Or !vlan_on
3938 	 *     clear the pool bit and possibly the vind
3939 	 */
3940 	vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3941 	if (vt & IXGBE_VT_CTL_VT_ENABLE) {
3942 		s32 vlvf_index;
3943 		u32 bits;
3944 
3945 		vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
3946 		if (vlvf_index < 0)
3947 			return vlvf_index;
3948 
3949 		if (vlan_on) {
3950 			/* set the pool bit */
3951 			if (vind < 32) {
3952 				bits = IXGBE_READ_REG(hw,
3953 						IXGBE_VLVFB(vlvf_index * 2));
3954 				bits |= (1 << vind);
3955 				IXGBE_WRITE_REG(hw,
3956 						IXGBE_VLVFB(vlvf_index * 2),
3957 						bits);
3958 			} else {
3959 				bits = IXGBE_READ_REG(hw,
3960 					IXGBE_VLVFB((vlvf_index * 2) + 1));
3961 				bits |= (1 << (vind - 32));
3962 				IXGBE_WRITE_REG(hw,
3963 					IXGBE_VLVFB((vlvf_index * 2) + 1),
3964 					bits);
3965 			}
3966 		} else {
3967 			/* clear the pool bit */
3968 			if (vind < 32) {
3969 				bits = IXGBE_READ_REG(hw,
3970 						IXGBE_VLVFB(vlvf_index * 2));
3971 				bits &= ~(1 << vind);
3972 				IXGBE_WRITE_REG(hw,
3973 						IXGBE_VLVFB(vlvf_index * 2),
3974 						bits);
3975 				bits |= IXGBE_READ_REG(hw,
3976 					IXGBE_VLVFB((vlvf_index * 2) + 1));
3977 			} else {
3978 				bits = IXGBE_READ_REG(hw,
3979 					IXGBE_VLVFB((vlvf_index * 2) + 1));
3980 				bits &= ~(1 << (vind - 32));
3981 				IXGBE_WRITE_REG(hw,
3982 					IXGBE_VLVFB((vlvf_index * 2) + 1),
3983 					bits);
3984 				bits |= IXGBE_READ_REG(hw,
3985 						IXGBE_VLVFB(vlvf_index * 2));
3986 			}
3987 		}
3988 
3989 		/*
3990 		 * If there are still bits set in the VLVFB registers
3991 		 * for the VLAN ID indicated we need to see if the
3992 		 * caller is requesting that we clear the VFTA entry bit.
3993 		 * If the caller has requested that we clear the VFTA
3994 		 * entry bit but there are still pools/VFs using this VLAN
3995 		 * ID entry then ignore the request.  We're not worried
3996 		 * about the case where we're turning the VFTA VLAN ID
3997 		 * entry bit on, only when requested to turn it off as
3998 		 * there may be multiple pools and/or VFs using the
3999 		 * VLAN ID entry.  In that case we cannot clear the
4000 		 * VFTA bit until all pools/VFs using that VLAN ID have also
4001 		 * been cleared.  This will be indicated by "bits" being
4002 		 * zero.
4003 		 */
4004 		if (bits) {
4005 			IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
4006 					(IXGBE_VLVF_VIEN | vlan));
4007 			if ((!vlan_on) && (vfta_changed != NULL)) {
4008 				/* someone wants to clear the vfta entry
4009 				 * but some pools/VFs are still using it.
4010 				 * Ignore it. */
4011 				*vfta_changed = FALSE;
4012 			}
4013 		} else
4014 			IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
4015 	}
4016 
4017 	return IXGBE_SUCCESS;
4018 }
4019 
4020 /**
4021  *  ixgbe_clear_vfta_generic - Clear VLAN filter table
4022  *  @hw: pointer to hardware structure
4023  *
4024  *  Clears the VLAN filer table, and the VMDq index associated with the filter
4025  **/
4026 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
4027 {
4028 	u32 offset;
4029 
4030 	DEBUGFUNC("ixgbe_clear_vfta_generic");
4031 
4032 	for (offset = 0; offset < hw->mac.vft_size; offset++)
4033 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
4034 
4035 	for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
4036 		IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
4037 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
4038 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
4039 	}
4040 
4041 	return IXGBE_SUCCESS;
4042 }
4043 
4044 /**
4045  *  ixgbe_check_mac_link_generic - Determine link and speed status
4046  *  @hw: pointer to hardware structure
4047  *  @speed: pointer to link speed
4048  *  @link_up: TRUE when link is up
4049  *  @link_up_wait_to_complete: bool used to wait for link up or not
4050  *
4051  *  Reads the links register to determine if link is up and the current speed
4052  **/
4053 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4054 				 bool *link_up, bool link_up_wait_to_complete)
4055 {
4056 	u32 links_reg, links_orig;
4057 	u32 i;
4058 
4059 	DEBUGFUNC("ixgbe_check_mac_link_generic");
4060 
4061 	/* clear the old state */
4062 	links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
4063 
4064 	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4065 
4066 	if (links_orig != links_reg) {
4067 		DEBUGOUT2("LINKS changed from %08X to %08X\n",
4068 			  links_orig, links_reg);
4069 	}
4070 
4071 	if (link_up_wait_to_complete) {
4072 		for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
4073 			if (links_reg & IXGBE_LINKS_UP) {
4074 				*link_up = TRUE;
4075 				break;
4076 			} else {
4077 				*link_up = FALSE;
4078 			}
4079 			msec_delay(100);
4080 			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4081 		}
4082 	} else {
4083 		if (links_reg & IXGBE_LINKS_UP)
4084 			*link_up = TRUE;
4085 		else
4086 			*link_up = FALSE;
4087 	}
4088 
4089 	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
4090 	case IXGBE_LINKS_SPEED_10G_82599:
4091 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
4092 		if (hw->mac.type >= ixgbe_mac_X550) {
4093 			if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4094 				*speed = IXGBE_LINK_SPEED_2_5GB_FULL;
4095 		}
4096 		break;
4097 	case IXGBE_LINKS_SPEED_1G_82599:
4098 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
4099 		break;
4100 	case IXGBE_LINKS_SPEED_100_82599:
4101 		*speed = IXGBE_LINK_SPEED_100_FULL;
4102 		if (hw->mac.type >= ixgbe_mac_X550) {
4103 			if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4104 				*speed = IXGBE_LINK_SPEED_5GB_FULL;
4105 		}
4106 		break;
4107 	default:
4108 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
4109 	}
4110 
4111 	return IXGBE_SUCCESS;
4112 }
4113 
4114 /**
4115  *  ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
4116  *  the EEPROM
4117  *  @hw: pointer to hardware structure
4118  *  @wwnn_prefix: the alternative WWNN prefix
4119  *  @wwpn_prefix: the alternative WWPN prefix
4120  *
4121  *  This function will read the EEPROM from the alternative SAN MAC address
4122  *  block to check the support for the alternative WWNN/WWPN prefix support.
4123  **/
4124 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
4125 				 u16 *wwpn_prefix)
4126 {
4127 	u16 offset, caps;
4128 	u16 alt_san_mac_blk_offset;
4129 
4130 	DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
4131 
4132 	/* clear output first */
4133 	*wwnn_prefix = 0xFFFF;
4134 	*wwpn_prefix = 0xFFFF;
4135 
4136 	/* check if alternative SAN MAC is supported */
4137 	offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
4138 	if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
4139 		goto wwn_prefix_err;
4140 
4141 	if ((alt_san_mac_blk_offset == 0) ||
4142 	    (alt_san_mac_blk_offset == 0xFFFF))
4143 		goto wwn_prefix_out;
4144 
4145 	/* check capability in alternative san mac address block */
4146 	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
4147 	if (hw->eeprom.ops.read(hw, offset, &caps))
4148 		goto wwn_prefix_err;
4149 	if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
4150 		goto wwn_prefix_out;
4151 
4152 	/* get the corresponding prefix for WWNN/WWPN */
4153 	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
4154 	if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) {
4155 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4156 			      "eeprom read at offset %d failed", offset);
4157 	}
4158 
4159 	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
4160 	if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
4161 		goto wwn_prefix_err;
4162 
4163 wwn_prefix_out:
4164 	return IXGBE_SUCCESS;
4165 
4166 wwn_prefix_err:
4167 	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4168 		      "eeprom read at offset %d failed", offset);
4169 	return IXGBE_SUCCESS;
4170 }
4171 
4172 /**
4173  *  ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
4174  *  @hw: pointer to hardware structure
4175  *  @bs: the fcoe boot status
4176  *
4177  *  This function will read the FCOE boot status from the iSCSI FCOE block
4178  **/
4179 s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
4180 {
4181 	u16 offset, caps, flags;
4182 	s32 status;
4183 
4184 	DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
4185 
4186 	/* clear output first */
4187 	*bs = ixgbe_fcoe_bootstatus_unavailable;
4188 
4189 	/* check if FCOE IBA block is present */
4190 	offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
4191 	status = hw->eeprom.ops.read(hw, offset, &caps);
4192 	if (status != IXGBE_SUCCESS)
4193 		goto out;
4194 
4195 	if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
4196 		goto out;
4197 
4198 	/* check if iSCSI FCOE block is populated */
4199 	status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
4200 	if (status != IXGBE_SUCCESS)
4201 		goto out;
4202 
4203 	if ((offset == 0) || (offset == 0xFFFF))
4204 		goto out;
4205 
4206 	/* read fcoe flags in iSCSI FCOE block */
4207 	offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
4208 	status = hw->eeprom.ops.read(hw, offset, &flags);
4209 	if (status != IXGBE_SUCCESS)
4210 		goto out;
4211 
4212 	if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
4213 		*bs = ixgbe_fcoe_bootstatus_enabled;
4214 	else
4215 		*bs = ixgbe_fcoe_bootstatus_disabled;
4216 
4217 out:
4218 	return status;
4219 }
4220 
4221 /**
4222  *  ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
4223  *  @hw: pointer to hardware structure
4224  *  @enable: enable or disable switch for anti-spoofing
4225  *  @pf: Physical Function pool - do not enable anti-spoofing for the PF
4226  *
4227  **/
4228 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
4229 {
4230 	int j;
4231 	int pf_target_reg = pf >> 3;
4232 	int pf_target_shift = pf % 8;
4233 	u32 pfvfspoof = 0;
4234 
4235 	if (hw->mac.type == ixgbe_mac_82598EB)
4236 		return;
4237 
4238 	if (enable)
4239 		pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
4240 
4241 	/*
4242 	 * PFVFSPOOF register array is size 8 with 8 bits assigned to
4243 	 * MAC anti-spoof enables in each register array element.
4244 	 */
4245 	for (j = 0; j < pf_target_reg; j++)
4246 		IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
4247 
4248 	/*
4249 	 * The PF should be allowed to spoof so that it can support
4250 	 * emulation mode NICs.  Do not set the bits assigned to the PF
4251 	 */
4252 	pfvfspoof &= (1 << pf_target_shift) - 1;
4253 	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
4254 
4255 	/*
4256 	 * Remaining pools belong to the PF so they do not need to have
4257 	 * anti-spoofing enabled.
4258 	 */
4259 	for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
4260 		IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
4261 }
4262 
4263 /**
4264  *  ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
4265  *  @hw: pointer to hardware structure
4266  *  @enable: enable or disable switch for VLAN anti-spoofing
4267  *  @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
4268  *
4269  **/
4270 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4271 {
4272 	int vf_target_reg = vf >> 3;
4273 	int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
4274 	u32 pfvfspoof;
4275 
4276 	if (hw->mac.type == ixgbe_mac_82598EB)
4277 		return;
4278 
4279 	pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4280 	if (enable)
4281 		pfvfspoof |= (1 << vf_target_shift);
4282 	else
4283 		pfvfspoof &= ~(1 << vf_target_shift);
4284 	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4285 }
4286 
4287 /**
4288  *  ixgbe_get_device_caps_generic - Get additional device capabilities
4289  *  @hw: pointer to hardware structure
4290  *  @device_caps: the EEPROM word with the extra device capabilities
4291  *
4292  *  This function will read the EEPROM location for the device capabilities,
4293  *  and return the word through device_caps.
4294  **/
4295 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
4296 {
4297 	DEBUGFUNC("ixgbe_get_device_caps_generic");
4298 
4299 	hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
4300 
4301 	return IXGBE_SUCCESS;
4302 }
4303 
4304 /**
4305  *  ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
4306  *  @hw: pointer to hardware structure
4307  *
4308  **/
4309 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
4310 {
4311 	u32 regval;
4312 	u32 i;
4313 
4314 	DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
4315 
4316 	/* Enable relaxed ordering */
4317 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
4318 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
4319 		regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4320 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
4321 	}
4322 
4323 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
4324 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
4325 		regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
4326 			  IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
4327 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
4328 	}
4329 
4330 }
4331 
4332 /**
4333  *  ixgbe_calculate_checksum - Calculate checksum for buffer
4334  *  @buffer: pointer to EEPROM
4335  *  @length: size of EEPROM to calculate a checksum for
4336  *  Calculates the checksum for some buffer on a specified length.  The
4337  *  checksum calculated is returned.
4338  **/
4339 u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
4340 {
4341 	u32 i;
4342 	u8 sum = 0;
4343 
4344 	DEBUGFUNC("ixgbe_calculate_checksum");
4345 
4346 	if (!buffer)
4347 		return 0;
4348 
4349 	for (i = 0; i < length; i++)
4350 		sum += buffer[i];
4351 
4352 	return (u8) (0 - sum);
4353 }
4354 
4355 /**
4356  *  ixgbe_host_interface_command - Issue command to manageability block
4357  *  @hw: pointer to the HW structure
4358  *  @buffer: contains the command to write and where the return status will
4359  *   be placed
4360  *  @length: length of buffer, must be multiple of 4 bytes
4361  *  @timeout: time in ms to wait for command completion
4362  *  @return_data: read and return data from the buffer (TRUE) or not (FALSE)
4363  *   Needed because FW structures are big endian and decoding of
4364  *   these fields can be 8 bit or 16 bit based on command. Decoding
4365  *   is not easily understood without making a table of commands.
4366  *   So we will leave this up to the caller to read back the data
4367  *   in these cases.
4368  *
4369  *  Communicates with the manageability block.  On success return IXGBE_SUCCESS
4370  *  else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
4371  **/
4372 s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
4373 				 u32 length, u32 timeout, bool return_data)
4374 {
4375 	u32 hicr, i, bi, fwsts;
4376 	u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
4377 	u16 buf_len;
4378 	u16 dword_len;
4379 
4380 	DEBUGFUNC("ixgbe_host_interface_command");
4381 
4382 	if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4383 		DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
4384 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4385 	}
4386 	/* Set bit 9 of FWSTS clearing FW reset indication */
4387 	fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
4388 	IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
4389 
4390 	/* Check that the host interface is enabled. */
4391 	hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4392 	if ((hicr & IXGBE_HICR_EN) == 0) {
4393 		DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
4394 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4395 	}
4396 
4397 	/* Calculate length in DWORDs. We must be DWORD aligned */
4398 	if ((length % (sizeof(u32))) != 0) {
4399 		DEBUGOUT("Buffer length failure, not aligned to dword");
4400 		return IXGBE_ERR_INVALID_ARGUMENT;
4401 	}
4402 
4403 	dword_len = length >> 2;
4404 
4405 	/* The device driver writes the relevant command block
4406 	 * into the ram area.
4407 	 */
4408 	for (i = 0; i < dword_len; i++)
4409 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
4410 				      i, IXGBE_CPU_TO_LE32(buffer[i]));
4411 
4412 	/* Setting this bit tells the ARC that a new command is pending. */
4413 	IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
4414 
4415 	for (i = 0; i < timeout; i++) {
4416 		hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4417 		if (!(hicr & IXGBE_HICR_C))
4418 			break;
4419 		msec_delay(1);
4420 	}
4421 
4422 	/* Check command completion */
4423 	if ((timeout != 0 && i == timeout) ||
4424 	    !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
4425 		ERROR_REPORT1(IXGBE_ERROR_CAUTION,
4426 			     "Command has failed with no status valid.\n");
4427 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4428 	}
4429 
4430 	if (!return_data)
4431 		return 0;
4432 
4433 	/* Calculate length in DWORDs */
4434 	dword_len = hdr_size >> 2;
4435 
4436 	/* first pull in the header so we know the buffer length */
4437 	for (bi = 0; bi < dword_len; bi++) {
4438 		buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4439 		IXGBE_LE32_TO_CPUS(&buffer[bi]);
4440 	}
4441 
4442 	/* If there is any thing in data position pull it in */
4443 	buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
4444 	if (buf_len == 0)
4445 		return 0;
4446 
4447 	if (length < buf_len + hdr_size) {
4448 		DEBUGOUT("Buffer not large enough for reply message.\n");
4449 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4450 	}
4451 
4452 	/* Calculate length in DWORDs, add 3 for odd lengths */
4453 	dword_len = (buf_len + 3) >> 2;
4454 
4455 	/* Pull in the rest of the buffer (bi is where we left off) */
4456 	for (; bi <= dword_len; bi++) {
4457 		buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4458 		IXGBE_LE32_TO_CPUS(&buffer[bi]);
4459 	}
4460 
4461 	return 0;
4462 }
4463 
4464 /**
4465  *  ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
4466  *  @hw: pointer to the HW structure
4467  *  @maj: driver version major number
4468  *  @min: driver version minor number
4469  *  @build: driver version build number
4470  *  @sub: driver version sub build number
4471  *
4472  *  Sends driver version number to firmware through the manageability
4473  *  block.  On success return IXGBE_SUCCESS
4474  *  else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4475  *  semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4476  **/
4477 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
4478 				 u8 build, u8 sub)
4479 {
4480 	struct ixgbe_hic_drv_info fw_cmd;
4481 	int i;
4482 	s32 ret_val = IXGBE_SUCCESS;
4483 
4484 	DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
4485 
4486 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM)
4487 	    != IXGBE_SUCCESS) {
4488 		ret_val = IXGBE_ERR_SWFW_SYNC;
4489 		goto out;
4490 	}
4491 
4492 	fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4493 	fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
4494 	fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4495 	fw_cmd.port_num = (u8)hw->bus.func;
4496 	fw_cmd.ver_maj = maj;
4497 	fw_cmd.ver_min = min;
4498 	fw_cmd.ver_build = build;
4499 	fw_cmd.ver_sub = sub;
4500 	fw_cmd.hdr.checksum = 0;
4501 	fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4502 				(FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4503 	fw_cmd.pad = 0;
4504 	fw_cmd.pad2 = 0;
4505 
4506 	for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4507 		ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4508 						       sizeof(fw_cmd),
4509 						       IXGBE_HI_COMMAND_TIMEOUT,
4510 						       TRUE);
4511 		if (ret_val != IXGBE_SUCCESS)
4512 			continue;
4513 
4514 		if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4515 		    FW_CEM_RESP_STATUS_SUCCESS)
4516 			ret_val = IXGBE_SUCCESS;
4517 		else
4518 			ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4519 
4520 		break;
4521 	}
4522 
4523 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4524 out:
4525 	return ret_val;
4526 }
4527 
4528 /**
4529  * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
4530  * @hw: pointer to hardware structure
4531  * @num_pb: number of packet buffers to allocate
4532  * @headroom: reserve n KB of headroom
4533  * @strategy: packet buffer allocation strategy
4534  **/
4535 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
4536 			     int strategy)
4537 {
4538 	u32 pbsize = hw->mac.rx_pb_size;
4539 	int i = 0;
4540 	u32 rxpktsize, txpktsize, txpbthresh;
4541 
4542 	/* Reserve headroom */
4543 	pbsize -= headroom;
4544 
4545 	if (!num_pb)
4546 		num_pb = 1;
4547 
4548 	/* Divide remaining packet buffer space amongst the number of packet
4549 	 * buffers requested using supplied strategy.
4550 	 */
4551 	switch (strategy) {
4552 	case PBA_STRATEGY_WEIGHTED:
4553 		/* ixgbe_dcb_pba_80_48 strategy weight first half of packet
4554 		 * buffer with 5/8 of the packet buffer space.
4555 		 */
4556 		rxpktsize = (pbsize * 5) / (num_pb * 4);
4557 		pbsize -= rxpktsize * (num_pb / 2);
4558 		rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
4559 		for (; i < (num_pb / 2); i++)
4560 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4561 		/* Fall through to configure remaining packet buffers */
4562 	case PBA_STRATEGY_EQUAL:
4563 		rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
4564 		for (; i < num_pb; i++)
4565 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4566 		break;
4567 	default:
4568 		break;
4569 	}
4570 
4571 	/* Only support an equally distributed Tx packet buffer strategy. */
4572 	txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
4573 	txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
4574 	for (i = 0; i < num_pb; i++) {
4575 		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4576 		IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4577 	}
4578 
4579 	/* Clear unused TCs, if any, to zero buffer size*/
4580 	for (; i < IXGBE_MAX_PB; i++) {
4581 		IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4582 		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4583 		IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4584 	}
4585 }
4586 
4587 /**
4588  * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
4589  * @hw: pointer to the hardware structure
4590  *
4591  * The 82599 and x540 MACs can experience issues if TX work is still pending
4592  * when a reset occurs.  This function prevents this by flushing the PCIe
4593  * buffers on the system.
4594  **/
4595 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
4596 {
4597 	u32 gcr_ext, hlreg0, i, poll;
4598 	u16 value;
4599 
4600 	/*
4601 	 * If double reset is not requested then all transactions should
4602 	 * already be clear and as such there is no work to do
4603 	 */
4604 	if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
4605 		return;
4606 
4607 	/*
4608 	 * Set loopback enable to prevent any transmits from being sent
4609 	 * should the link come up.  This assumes that the RXCTRL.RXEN bit
4610 	 * has already been cleared.
4611 	 */
4612 	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4613 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
4614 
4615 	/* Wait for a last completion before clearing buffers */
4616 	IXGBE_WRITE_FLUSH(hw);
4617 	msec_delay(3);
4618 
4619 	/*
4620 	 * Before proceeding, make sure that the PCIe block does not have
4621 	 * transactions pending.
4622 	 */
4623 	poll = ixgbe_pcie_timeout_poll(hw);
4624 	for (i = 0; i < poll; i++) {
4625 		usec_delay(100);
4626 		value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
4627 		if (IXGBE_REMOVED(hw->hw_addr))
4628 			goto out;
4629 		if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
4630 			goto out;
4631 	}
4632 
4633 out:
4634 	/* initiate cleaning flow for buffers in the PCIe transaction layer */
4635 	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
4636 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
4637 			gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
4638 
4639 	/* Flush all writes and allow 20usec for all transactions to clear */
4640 	IXGBE_WRITE_FLUSH(hw);
4641 	usec_delay(20);
4642 
4643 	/* restore previous register values */
4644 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4645 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4646 }
4647 
4648 
4649 /**
4650  * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg
4651  * @hw: pointer to hardware structure
4652  * @map: pointer to u8 arr for returning map
4653  *
4654  * Read the rtrup2tc HW register and resolve its content into map
4655  **/
4656 void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map)
4657 {
4658 	u32 reg, i;
4659 
4660 	reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
4661 	for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
4662 		map[i] = IXGBE_RTRUP2TC_UP_MASK &
4663 			(reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
4664 	return;
4665 }
4666 
4667 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
4668 {
4669 	u32 pfdtxgswc;
4670 	u32 rxctrl;
4671 
4672 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4673 	if (rxctrl & IXGBE_RXCTRL_RXEN) {
4674 		if (hw->mac.type != ixgbe_mac_82598EB) {
4675 			pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
4676 			if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
4677 				pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
4678 				IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
4679 				hw->mac.set_lben = TRUE;
4680 			} else {
4681 				hw->mac.set_lben = FALSE;
4682 			}
4683 		}
4684 		rxctrl &= ~IXGBE_RXCTRL_RXEN;
4685 		IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
4686 	}
4687 }
4688 
4689 void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
4690 {
4691 	u32 pfdtxgswc;
4692 	u32 rxctrl;
4693 
4694 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4695 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN));
4696 
4697 	if (hw->mac.type != ixgbe_mac_82598EB) {
4698 		if (hw->mac.set_lben) {
4699 			pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
4700 			pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN;
4701 			IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
4702 			hw->mac.set_lben = FALSE;
4703 		}
4704 	}
4705 }
4706 
4707 /**
4708  * ixgbe_mng_present - returns TRUE when management capability is present
4709  * @hw: pointer to hardware structure
4710  */
4711 bool ixgbe_mng_present(struct ixgbe_hw *hw)
4712 {
4713 	u32 fwsm;
4714 
4715 	if (hw->mac.type < ixgbe_mac_82599EB)
4716 		return FALSE;
4717 
4718 	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
4719 	fwsm &= IXGBE_FWSM_MODE_MASK;
4720 	return fwsm == IXGBE_FWSM_FW_MODE_PT;
4721 }
4722 
4723 /**
4724  * ixgbe_mng_enabled - Is the manageability engine enabled?
4725  * @hw: pointer to hardware structure
4726  *
4727  * Returns TRUE if the manageability engine is enabled.
4728  **/
4729 bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
4730 {
4731 	u32 fwsm, manc, factps;
4732 
4733 	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
4734 	if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
4735 		return FALSE;
4736 
4737 	manc = IXGBE_READ_REG(hw, IXGBE_MANC);
4738 	if (!(manc & IXGBE_MANC_RCV_TCO_EN))
4739 		return FALSE;
4740 
4741 	if (hw->mac.type <= ixgbe_mac_X540) {
4742 		factps = IXGBE_READ_REG(hw, IXGBE_FACTPS);
4743 		if (factps & IXGBE_FACTPS_MNGCG)
4744 			return FALSE;
4745 	}
4746 
4747 	return TRUE;
4748 }
4749 
4750 /**
4751  *  ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
4752  *  @hw: pointer to hardware structure
4753  *  @speed: new link speed
4754  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
4755  *
4756  *  Set the link speed in the MAC and/or PHY register and restarts link.
4757  **/
4758 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
4759 					  ixgbe_link_speed speed,
4760 					  bool autoneg_wait_to_complete)
4761 {
4762 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
4763 	ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
4764 	s32 status = IXGBE_SUCCESS;
4765 	u32 speedcnt = 0;
4766 	u32 i = 0;
4767 	bool autoneg, link_up = FALSE;
4768 
4769 	DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
4770 
4771 	/* Mask off requested but non-supported speeds */
4772 	status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
4773 	if (status != IXGBE_SUCCESS)
4774 		return status;
4775 
4776 	speed &= link_speed;
4777 
4778 	/* Try each speed one by one, highest priority first.  We do this in
4779 	 * software because 10Gb fiber doesn't support speed autonegotiation.
4780 	 */
4781 	if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
4782 		speedcnt++;
4783 		highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
4784 
4785 		/* If we already have link at this speed, just jump out */
4786 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
4787 		if (status != IXGBE_SUCCESS)
4788 			return status;
4789 
4790 		if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
4791 			goto out;
4792 
4793 		/* Set the module link speed */
4794 		switch (hw->phy.media_type) {
4795 		case ixgbe_media_type_fiber_fixed:
4796 		case ixgbe_media_type_fiber:
4797 			ixgbe_set_rate_select_speed(hw,
4798 						    IXGBE_LINK_SPEED_10GB_FULL);
4799 			break;
4800 		case ixgbe_media_type_fiber_qsfp:
4801 			/* QSFP module automatically detects MAC link speed */
4802 			break;
4803 		default:
4804 			DEBUGOUT("Unexpected media type.\n");
4805 			break;
4806 		}
4807 
4808 		/* Allow module to change analog characteristics (1G->10G) */
4809 		msec_delay(40);
4810 
4811 		status = ixgbe_setup_mac_link(hw,
4812 					      IXGBE_LINK_SPEED_10GB_FULL,
4813 					      autoneg_wait_to_complete);
4814 		if (status != IXGBE_SUCCESS)
4815 			return status;
4816 
4817 		/* Flap the Tx laser if it has not already been done */
4818 		ixgbe_flap_tx_laser(hw);
4819 
4820 		/* Wait for the controller to acquire link.  Per IEEE 802.3ap,
4821 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
4822 		 * attempted.  82599 uses the same timing for 10g SFI.
4823 		 */
4824 		for (i = 0; i < 5; i++) {
4825 			/* Wait for the link partner to also set speed */
4826 			msec_delay(100);
4827 
4828 			/* If we have link, just jump out */
4829 			status = ixgbe_check_link(hw, &link_speed,
4830 						  &link_up, FALSE);
4831 			if (status != IXGBE_SUCCESS)
4832 				return status;
4833 
4834 			if (link_up)
4835 				goto out;
4836 		}
4837 	}
4838 
4839 	if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
4840 		speedcnt++;
4841 		if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
4842 			highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
4843 
4844 		/* If we already have link at this speed, just jump out */
4845 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
4846 		if (status != IXGBE_SUCCESS)
4847 			return status;
4848 
4849 		if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
4850 			goto out;
4851 
4852 		/* Set the module link speed */
4853 		switch (hw->phy.media_type) {
4854 		case ixgbe_media_type_fiber_fixed:
4855 		case ixgbe_media_type_fiber:
4856 			ixgbe_set_rate_select_speed(hw,
4857 						    IXGBE_LINK_SPEED_1GB_FULL);
4858 			break;
4859 		case ixgbe_media_type_fiber_qsfp:
4860 			/* QSFP module automatically detects link speed */
4861 			break;
4862 		default:
4863 			DEBUGOUT("Unexpected media type.\n");
4864 			break;
4865 		}
4866 
4867 		/* Allow module to change analog characteristics (10G->1G) */
4868 		msec_delay(40);
4869 
4870 		status = ixgbe_setup_mac_link(hw,
4871 					      IXGBE_LINK_SPEED_1GB_FULL,
4872 					      autoneg_wait_to_complete);
4873 		if (status != IXGBE_SUCCESS)
4874 			return status;
4875 
4876 		/* Flap the Tx laser if it has not already been done */
4877 		ixgbe_flap_tx_laser(hw);
4878 
4879 		/* Wait for the link partner to also set speed */
4880 		msec_delay(100);
4881 
4882 		/* If we have link, just jump out */
4883 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
4884 		if (status != IXGBE_SUCCESS)
4885 			return status;
4886 
4887 		if (link_up)
4888 			goto out;
4889 	}
4890 
4891 	/* We didn't get link.  Configure back to the highest speed we tried,
4892 	 * (if there was more than one).  We call ourselves back with just the
4893 	 * single highest speed that the user requested.
4894 	 */
4895 	if (speedcnt > 1)
4896 		status = ixgbe_setup_mac_link_multispeed_fiber(hw,
4897 						      highest_link_speed,
4898 						      autoneg_wait_to_complete);
4899 
4900 out:
4901 	/* Set autoneg_advertised value based on input link speed */
4902 	hw->phy.autoneg_advertised = 0;
4903 
4904 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
4905 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
4906 
4907 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
4908 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
4909 
4910 	return status;
4911 }
4912 
4913 /**
4914  *  ixgbe_set_soft_rate_select_speed - Set module link speed
4915  *  @hw: pointer to hardware structure
4916  *  @speed: link speed to set
4917  *
4918  *  Set module link speed via the soft rate select.
4919  */
4920 void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
4921 					ixgbe_link_speed speed)
4922 {
4923 	s32 status;
4924 	u8 rs, eeprom_data;
4925 
4926 	switch (speed) {
4927 	case IXGBE_LINK_SPEED_10GB_FULL:
4928 		/* one bit mask same as setting on */
4929 		rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
4930 		break;
4931 	case IXGBE_LINK_SPEED_1GB_FULL:
4932 		rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
4933 		break;
4934 	default:
4935 		DEBUGOUT("Invalid fixed module speed\n");
4936 		return;
4937 	}
4938 
4939 	/* Set RS0 */
4940 	status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
4941 					   IXGBE_I2C_EEPROM_DEV_ADDR2,
4942 					   &eeprom_data);
4943 	if (status) {
4944 		DEBUGOUT("Failed to read Rx Rate Select RS0\n");
4945 		goto out;
4946 	}
4947 
4948 	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
4949 
4950 	status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
4951 					    IXGBE_I2C_EEPROM_DEV_ADDR2,
4952 					    eeprom_data);
4953 	if (status) {
4954 		DEBUGOUT("Failed to write Rx Rate Select RS0\n");
4955 		goto out;
4956 	}
4957 
4958 	/* Set RS1 */
4959 	status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
4960 					   IXGBE_I2C_EEPROM_DEV_ADDR2,
4961 					   &eeprom_data);
4962 	if (status) {
4963 		DEBUGOUT("Failed to read Rx Rate Select RS1\n");
4964 		goto out;
4965 	}
4966 
4967 	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
4968 
4969 	status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
4970 					    IXGBE_I2C_EEPROM_DEV_ADDR2,
4971 					    eeprom_data);
4972 	if (status) {
4973 		DEBUGOUT("Failed to write Rx Rate Select RS1\n");
4974 		goto out;
4975 	}
4976 out:
4977 	return;
4978 }
4979