xref: /titanic_41/usr/src/uts/common/io/ixgbe/ixgbe_common.c (revision 69b5a878d62fdee1b12e78371ce6cc8abddcad15)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2012, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD: src/sys/dev/ixgbe/ixgbe_common.c,v 1.14 2012/07/05 20:51:44 jfv Exp $*/
34 
35 #include "ixgbe_common.h"
36 #include "ixgbe_phy.h"
37 #include "ixgbe_api.h"
38 
39 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
40 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
41 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
42 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
43 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
44 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
45 					u16 count);
46 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
47 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
48 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
49 static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
50 
51 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
52 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
53 					 u16 *san_mac_offset);
54 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
55 					     u16 words, u16 *data);
56 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
57 					      u16 words, u16 *data);
58 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
59 						 u16 offset);
60 
61 /**
62  *  ixgbe_init_ops_generic - Inits function ptrs
63  *  @hw: pointer to the hardware structure
64  *
65  *  Initialize the function pointers.
66  **/
67 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
68 {
69 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
70 	struct ixgbe_mac_info *mac = &hw->mac;
71 	u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
72 
73 	DEBUGFUNC("ixgbe_init_ops_generic");
74 
75 	/* EEPROM */
76 	eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic;
77 	/* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
78 	if (eec & IXGBE_EEC_PRES) {
79 		eeprom->ops.read = &ixgbe_read_eerd_generic;
80 		eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_generic;
81 	} else {
82 		eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic;
83 		eeprom->ops.read_buffer =
84 				 &ixgbe_read_eeprom_buffer_bit_bang_generic;
85 	}
86 	eeprom->ops.write = &ixgbe_write_eeprom_generic;
87 	eeprom->ops.write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic;
88 	eeprom->ops.validate_checksum =
89 				      &ixgbe_validate_eeprom_checksum_generic;
90 	eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic;
91 	eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic;
92 
93 	/* MAC */
94 	mac->ops.init_hw = &ixgbe_init_hw_generic;
95 	mac->ops.reset_hw = NULL;
96 	mac->ops.start_hw = &ixgbe_start_hw_generic;
97 	mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic;
98 	mac->ops.get_media_type = NULL;
99 	mac->ops.get_supported_physical_layer = NULL;
100 	mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_generic;
101 	mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic;
102 	mac->ops.stop_adapter = &ixgbe_stop_adapter_generic;
103 	mac->ops.get_bus_info = &ixgbe_get_bus_info_generic;
104 	mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie;
105 	mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync;
106 	mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync;
107 
108 	/* LEDs */
109 	mac->ops.led_on = &ixgbe_led_on_generic;
110 	mac->ops.led_off = &ixgbe_led_off_generic;
111 	mac->ops.blink_led_start = &ixgbe_blink_led_start_generic;
112 	mac->ops.blink_led_stop = &ixgbe_blink_led_stop_generic;
113 
114 	/* RAR, Multicast, VLAN */
115 	mac->ops.set_rar = &ixgbe_set_rar_generic;
116 	mac->ops.clear_rar = &ixgbe_clear_rar_generic;
117 	mac->ops.insert_mac_addr = NULL;
118 	mac->ops.set_vmdq = NULL;
119 	mac->ops.clear_vmdq = NULL;
120 	mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic;
121 	mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic;
122 	mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic;
123 	mac->ops.enable_mc = &ixgbe_enable_mc_generic;
124 	mac->ops.disable_mc = &ixgbe_disable_mc_generic;
125 	mac->ops.clear_vfta = NULL;
126 	mac->ops.set_vfta = NULL;
127 	mac->ops.set_vlvf = NULL;
128 	mac->ops.init_uta_tables = NULL;
129 
130 	/* Flow Control */
131 	mac->ops.fc_enable = &ixgbe_fc_enable_generic;
132 
133 	/* Link */
134 	mac->ops.get_link_capabilities = NULL;
135 	mac->ops.setup_link = NULL;
136 	mac->ops.check_link = NULL;
137 
138 	return IXGBE_SUCCESS;
139 }
140 
141 /**
142  *  ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
143  *  control
144  *  @hw: pointer to hardware structure
145  *
146  *  There are several phys that do not support autoneg flow control. This
147  *  function check the device id to see if the associated phy supports
148  *  autoneg flow control.
149  **/
150 static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
151 {
152 
153 	DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
154 
155 	switch (hw->device_id) {
156 	case IXGBE_DEV_ID_X540T:
157 	case IXGBE_DEV_ID_X540T1:
158 		return IXGBE_SUCCESS;
159 	case IXGBE_DEV_ID_82599_T3_LOM:
160 		return IXGBE_SUCCESS;
161 	default:
162 		return IXGBE_ERR_FC_NOT_SUPPORTED;
163 	}
164 }
165 
166 /**
167  *  ixgbe_setup_fc - Set up flow control
168  *  @hw: pointer to hardware structure
169  *
170  *  Called at init time to set up flow control.
171  **/
172 static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
173 {
174 	s32 ret_val = IXGBE_SUCCESS;
175 	u32 reg = 0, reg_bp = 0;
176 	u16 reg_cu = 0;
177 
178 	DEBUGFUNC("ixgbe_setup_fc");
179 
180 	/*
181 	 * Validate the requested mode.  Strict IEEE mode does not allow
182 	 * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
183 	 */
184 	if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
185 		DEBUGOUT("ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
186 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
187 		goto out;
188 	}
189 
190 	/*
191 	 * 10gig parts do not have a word in the EEPROM to determine the
192 	 * default flow control setting, so we explicitly set it to full.
193 	 */
194 	if (hw->fc.requested_mode == ixgbe_fc_default)
195 		hw->fc.requested_mode = ixgbe_fc_full;
196 
197 	/*
198 	 * Set up the 1G and 10G flow control advertisement registers so the
199 	 * HW will be able to do fc autoneg once the cable is plugged in.  If
200 	 * we link at 10G, the 1G advertisement is harmless and vice versa.
201 	 */
202 	switch (hw->phy.media_type) {
203 	case ixgbe_media_type_fiber:
204 	case ixgbe_media_type_backplane:
205 		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
206 		reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
207 		break;
208 	case ixgbe_media_type_copper:
209 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
210 				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg_cu);
211 		break;
212 	default:
213 		break;
214 	}
215 
216 	/*
217 	 * The possible values of fc.requested_mode are:
218 	 * 0: Flow control is completely disabled
219 	 * 1: Rx flow control is enabled (we can receive pause frames,
220 	 *    but not send pause frames).
221 	 * 2: Tx flow control is enabled (we can send pause frames but
222 	 *    we do not support receiving pause frames).
223 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
224 	 * other: Invalid.
225 	 */
226 	switch (hw->fc.requested_mode) {
227 	case ixgbe_fc_none:
228 		/* Flow control completely disabled by software override. */
229 		reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
230 		if (hw->phy.media_type == ixgbe_media_type_backplane)
231 			reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
232 				    IXGBE_AUTOC_ASM_PAUSE);
233 		else if (hw->phy.media_type == ixgbe_media_type_copper)
234 			reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
235 		break;
236 	case ixgbe_fc_tx_pause:
237 		/*
238 		 * Tx Flow control is enabled, and Rx Flow control is
239 		 * disabled by software override.
240 		 */
241 		reg |= IXGBE_PCS1GANA_ASM_PAUSE;
242 		reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
243 		if (hw->phy.media_type == ixgbe_media_type_backplane) {
244 			reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
245 			reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
246 		} else if (hw->phy.media_type == ixgbe_media_type_copper) {
247 			reg_cu |= IXGBE_TAF_ASM_PAUSE;
248 			reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
249 		}
250 		break;
251 	case ixgbe_fc_rx_pause:
252 		/*
253 		 * Rx Flow control is enabled and Tx Flow control is
254 		 * disabled by software override. Since there really
255 		 * isn't a way to advertise that we are capable of RX
256 		 * Pause ONLY, we will advertise that we support both
257 		 * symmetric and asymmetric Rx PAUSE, as such we fall
258 		 * through to the fc_full statement.  Later, we will
259 		 * disable the adapter's ability to send PAUSE frames.
260 		 */
261 	case ixgbe_fc_full:
262 		/* Flow control (both Rx and Tx) is enabled by SW override. */
263 		reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
264 		if (hw->phy.media_type == ixgbe_media_type_backplane)
265 			reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
266 				  IXGBE_AUTOC_ASM_PAUSE;
267 		else if (hw->phy.media_type == ixgbe_media_type_copper)
268 			reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
269 		break;
270 	default:
271 		DEBUGOUT("Flow control param set incorrectly\n");
272 		ret_val = IXGBE_ERR_CONFIG;
273 		goto out;
274 	}
275 
276 	if (hw->mac.type != ixgbe_mac_X540) {
277 		/*
278 		 * Enable auto-negotiation between the MAC & PHY;
279 		 * the MAC will advertise clause 37 flow control.
280 		 */
281 		IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
282 		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
283 
284 		/* Disable AN timeout */
285 		if (hw->fc.strict_ieee)
286 			reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
287 
288 		IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
289 		DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
290 	}
291 
292 	/*
293 	 * AUTOC restart handles negotiation of 1G and 10G on backplane
294 	 * and copper. There is no need to set the PCS1GCTL register.
295 	 *
296 	 */
297 	if (hw->phy.media_type == ixgbe_media_type_backplane) {
298 		reg_bp |= IXGBE_AUTOC_AN_RESTART;
299 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
300 	} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
301 		    (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)) {
302 		hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
303 				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
304 	}
305 
306 	DEBUGOUT1("Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
307 out:
308 	return ret_val;
309 }
310 
311 /**
312  *  ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
313  *  @hw: pointer to hardware structure
314  *
315  *  Starts the hardware by filling the bus info structure and media type, clears
316  *  all on chip counters, initializes receive address registers, multicast
317  *  table, VLAN filter table, calls routine to set up link and flow control
318  *  settings, and leaves transmit and receive units disabled and uninitialized
319  **/
320 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
321 {
322 	s32 ret_val;
323 	u32 ctrl_ext;
324 
325 	DEBUGFUNC("ixgbe_start_hw_generic");
326 
327 	/* Set the media type */
328 	hw->phy.media_type = hw->mac.ops.get_media_type(hw);
329 
330 	/* PHY ops initialization must be done in reset_hw() */
331 
332 	/* Clear the VLAN filter table */
333 	hw->mac.ops.clear_vfta(hw);
334 
335 	/* Clear statistics registers */
336 	hw->mac.ops.clear_hw_cntrs(hw);
337 
338 	/* Set No Snoop Disable */
339 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
340 	ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
341 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
342 	IXGBE_WRITE_FLUSH(hw);
343 
344 	/* Setup flow control */
345 	ret_val = ixgbe_setup_fc(hw);
346 	if (ret_val != IXGBE_SUCCESS)
347 		goto out;
348 
349 	/* Clear adapter stopped flag */
350 	hw->adapter_stopped = FALSE;
351 
352 out:
353 	return ret_val;
354 }
355 
356 /**
357  *  ixgbe_start_hw_gen2 - Init sequence for common device family
358  *  @hw: pointer to hw structure
359  *
360  * Performs the init sequence common to the second generation
361  * of 10 GbE devices.
362  * Devices in the second generation:
363  *     82599
364  *     X540
365  **/
366 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
367 {
368 	u32 i;
369 	u32 regval;
370 
371 	/* Clear the rate limiters */
372 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
373 		IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
374 		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
375 	}
376 	IXGBE_WRITE_FLUSH(hw);
377 
378 	/* Disable relaxed ordering */
379 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
380 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
381 		regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
382 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
383 	}
384 
385 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
386 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
387 		regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
388 			    IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
389 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
390 	}
391 
392 	return IXGBE_SUCCESS;
393 }
394 
395 /**
396  *  ixgbe_init_hw_generic - Generic hardware initialization
397  *  @hw: pointer to hardware structure
398  *
399  *  Initialize the hardware by resetting the hardware, filling the bus info
400  *  structure and media type, clears all on chip counters, initializes receive
401  *  address registers, multicast table, VLAN filter table, calls routine to set
402  *  up link and flow control settings, and leaves transmit and receive units
403  *  disabled and uninitialized
404  **/
405 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
406 {
407 	s32 status;
408 
409 	DEBUGFUNC("ixgbe_init_hw_generic");
410 
411 	/* Reset the hardware */
412 	status = hw->mac.ops.reset_hw(hw);
413 
414 	if (status == IXGBE_SUCCESS) {
415 		/* Start the HW */
416 		status = hw->mac.ops.start_hw(hw);
417 	}
418 
419 	return status;
420 }
421 
422 /**
423  *  ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
424  *  @hw: pointer to hardware structure
425  *
426  *  Clears all hardware statistics counters by reading them from the hardware
427  *  Statistics counters are clear on read.
428  **/
429 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
430 {
431 	u16 i = 0;
432 
433 	DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
434 
435 	IXGBE_READ_REG(hw, IXGBE_CRCERRS);
436 	IXGBE_READ_REG(hw, IXGBE_ILLERRC);
437 	IXGBE_READ_REG(hw, IXGBE_ERRBC);
438 	IXGBE_READ_REG(hw, IXGBE_MSPDC);
439 	for (i = 0; i < 8; i++)
440 		IXGBE_READ_REG(hw, IXGBE_MPC(i));
441 
442 	IXGBE_READ_REG(hw, IXGBE_MLFC);
443 	IXGBE_READ_REG(hw, IXGBE_MRFC);
444 	IXGBE_READ_REG(hw, IXGBE_RLEC);
445 	IXGBE_READ_REG(hw, IXGBE_LXONTXC);
446 	IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
447 	if (hw->mac.type >= ixgbe_mac_82599EB) {
448 		IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
449 		IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
450 	} else {
451 		IXGBE_READ_REG(hw, IXGBE_LXONRXC);
452 		IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
453 	}
454 
455 	for (i = 0; i < 8; i++) {
456 		IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
457 		IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
458 		if (hw->mac.type >= ixgbe_mac_82599EB) {
459 			IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
460 			IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
461 		} else {
462 			IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
463 			IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
464 		}
465 	}
466 	if (hw->mac.type >= ixgbe_mac_82599EB)
467 		for (i = 0; i < 8; i++)
468 			IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
469 	IXGBE_READ_REG(hw, IXGBE_PRC64);
470 	IXGBE_READ_REG(hw, IXGBE_PRC127);
471 	IXGBE_READ_REG(hw, IXGBE_PRC255);
472 	IXGBE_READ_REG(hw, IXGBE_PRC511);
473 	IXGBE_READ_REG(hw, IXGBE_PRC1023);
474 	IXGBE_READ_REG(hw, IXGBE_PRC1522);
475 	IXGBE_READ_REG(hw, IXGBE_GPRC);
476 	IXGBE_READ_REG(hw, IXGBE_BPRC);
477 	IXGBE_READ_REG(hw, IXGBE_MPRC);
478 	IXGBE_READ_REG(hw, IXGBE_GPTC);
479 	IXGBE_READ_REG(hw, IXGBE_GORCL);
480 	IXGBE_READ_REG(hw, IXGBE_GORCH);
481 	IXGBE_READ_REG(hw, IXGBE_GOTCL);
482 	IXGBE_READ_REG(hw, IXGBE_GOTCH);
483 	if (hw->mac.type == ixgbe_mac_82598EB)
484 		for (i = 0; i < 8; i++)
485 			IXGBE_READ_REG(hw, IXGBE_RNBC(i));
486 	IXGBE_READ_REG(hw, IXGBE_RUC);
487 	IXGBE_READ_REG(hw, IXGBE_RFC);
488 	IXGBE_READ_REG(hw, IXGBE_ROC);
489 	IXGBE_READ_REG(hw, IXGBE_RJC);
490 	IXGBE_READ_REG(hw, IXGBE_MNGPRC);
491 	IXGBE_READ_REG(hw, IXGBE_MNGPDC);
492 	IXGBE_READ_REG(hw, IXGBE_MNGPTC);
493 	IXGBE_READ_REG(hw, IXGBE_TORL);
494 	IXGBE_READ_REG(hw, IXGBE_TORH);
495 	IXGBE_READ_REG(hw, IXGBE_TPR);
496 	IXGBE_READ_REG(hw, IXGBE_TPT);
497 	IXGBE_READ_REG(hw, IXGBE_PTC64);
498 	IXGBE_READ_REG(hw, IXGBE_PTC127);
499 	IXGBE_READ_REG(hw, IXGBE_PTC255);
500 	IXGBE_READ_REG(hw, IXGBE_PTC511);
501 	IXGBE_READ_REG(hw, IXGBE_PTC1023);
502 	IXGBE_READ_REG(hw, IXGBE_PTC1522);
503 	IXGBE_READ_REG(hw, IXGBE_MPTC);
504 	IXGBE_READ_REG(hw, IXGBE_BPTC);
505 	for (i = 0; i < 16; i++) {
506 		IXGBE_READ_REG(hw, IXGBE_QPRC(i));
507 		IXGBE_READ_REG(hw, IXGBE_QPTC(i));
508 		if (hw->mac.type >= ixgbe_mac_82599EB) {
509 			IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
510 			IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
511 			IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
512 			IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
513 			IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
514 		} else {
515 			IXGBE_READ_REG(hw, IXGBE_QBRC(i));
516 			IXGBE_READ_REG(hw, IXGBE_QBTC(i));
517 		}
518 	}
519 
520 	if (hw->mac.type == ixgbe_mac_X540) {
521 		if (hw->phy.id == 0)
522 			ixgbe_identify_phy(hw);
523 		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
524 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
525 		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
526 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
527 		hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
528 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
529 		hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
530 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
531 	}
532 
533 	return IXGBE_SUCCESS;
534 }
535 
536 /**
537  *  ixgbe_read_pba_string_generic - Reads part number string from EEPROM
538  *  @hw: pointer to hardware structure
539  *  @pba_num: stores the part number string from the EEPROM
540  *  @pba_num_size: part number string buffer length
541  *
542  *  Reads the part number string from the EEPROM.
543  **/
544 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
545 				  u32 pba_num_size)
546 {
547 	s32 ret_val;
548 	u16 data;
549 	u16 pba_ptr;
550 	u16 offset;
551 	u16 length;
552 
553 	DEBUGFUNC("ixgbe_read_pba_string_generic");
554 
555 	if (pba_num == NULL) {
556 		DEBUGOUT("PBA string buffer was null\n");
557 		return IXGBE_ERR_INVALID_ARGUMENT;
558 	}
559 
560 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
561 	if (ret_val) {
562 		DEBUGOUT("NVM Read Error\n");
563 		return ret_val;
564 	}
565 
566 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
567 	if (ret_val) {
568 		DEBUGOUT("NVM Read Error\n");
569 		return ret_val;
570 	}
571 
572 	/*
573 	 * if data is not ptr guard the PBA must be in legacy format which
574 	 * means pba_ptr is actually our second data word for the PBA number
575 	 * and we can decode it into an ascii string
576 	 */
577 	if (data != IXGBE_PBANUM_PTR_GUARD) {
578 		DEBUGOUT("NVM PBA number is not stored as string\n");
579 
580 		/* we will need 11 characters to store the PBA */
581 		if (pba_num_size < 11) {
582 			DEBUGOUT("PBA string buffer too small\n");
583 			return IXGBE_ERR_NO_SPACE;
584 		}
585 
586 		/* extract hex string from data and pba_ptr */
587 		pba_num[0] = (data >> 12) & 0xF;
588 		pba_num[1] = (data >> 8) & 0xF;
589 		pba_num[2] = (data >> 4) & 0xF;
590 		pba_num[3] = data & 0xF;
591 		pba_num[4] = (pba_ptr >> 12) & 0xF;
592 		pba_num[5] = (pba_ptr >> 8) & 0xF;
593 		pba_num[6] = '-';
594 		pba_num[7] = 0;
595 		pba_num[8] = (pba_ptr >> 4) & 0xF;
596 		pba_num[9] = pba_ptr & 0xF;
597 
598 		/* put a null character on the end of our string */
599 		pba_num[10] = '\0';
600 
601 		/* switch all the data but the '-' to hex char */
602 		for (offset = 0; offset < 10; offset++) {
603 			if (pba_num[offset] < 0xA)
604 				pba_num[offset] += '0';
605 			else if (pba_num[offset] < 0x10)
606 				pba_num[offset] += 'A' - 0xA;
607 		}
608 
609 		return IXGBE_SUCCESS;
610 	}
611 
612 	ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
613 	if (ret_val) {
614 		DEBUGOUT("NVM Read Error\n");
615 		return ret_val;
616 	}
617 
618 	if (length == 0xFFFF || length == 0) {
619 		DEBUGOUT("NVM PBA number section invalid length\n");
620 		return IXGBE_ERR_PBA_SECTION;
621 	}
622 
623 	/* check if pba_num buffer is big enough */
624 	if (pba_num_size  < (((u32)length * 2) - 1)) {
625 		DEBUGOUT("PBA string buffer too small\n");
626 		return IXGBE_ERR_NO_SPACE;
627 	}
628 
629 	/* trim pba length from start of string */
630 	pba_ptr++;
631 	length--;
632 
633 	for (offset = 0; offset < length; offset++) {
634 		ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
635 		if (ret_val) {
636 			DEBUGOUT("NVM Read Error\n");
637 			return ret_val;
638 		}
639 		pba_num[offset * 2] = (u8)(data >> 8);
640 		pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
641 	}
642 	pba_num[offset * 2] = '\0';
643 
644 	return IXGBE_SUCCESS;
645 }
646 
647 /**
648  *  ixgbe_read_pba_num_generic - Reads part number from EEPROM
649  *  @hw: pointer to hardware structure
650  *  @pba_num: stores the part number from the EEPROM
651  *
652  *  Reads the part number from the EEPROM.
653  **/
654 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
655 {
656 	s32 ret_val;
657 	u16 data;
658 
659 	DEBUGFUNC("ixgbe_read_pba_num_generic");
660 
661 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
662 	if (ret_val) {
663 		DEBUGOUT("NVM Read Error\n");
664 		return ret_val;
665 	} else if (data == IXGBE_PBANUM_PTR_GUARD) {
666 		DEBUGOUT("NVM Not supported\n");
667 		return IXGBE_NOT_IMPLEMENTED;
668 	}
669 	*pba_num = (u32)(data << 16);
670 
671 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
672 	if (ret_val) {
673 		DEBUGOUT("NVM Read Error\n");
674 		return ret_val;
675 	}
676 	*pba_num |= data;
677 
678 	return IXGBE_SUCCESS;
679 }
680 
681 /**
682  *  ixgbe_get_mac_addr_generic - Generic get MAC address
683  *  @hw: pointer to hardware structure
684  *  @mac_addr: Adapter MAC address
685  *
686  *  Reads the adapter's MAC address from first Receive Address Register (RAR0)
687  *  A reset of the adapter must be performed prior to calling this function
688  *  in order for the MAC address to have been loaded from the EEPROM into RAR0
689  **/
690 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
691 {
692 	u32 rar_high;
693 	u32 rar_low;
694 	u16 i;
695 
696 	DEBUGFUNC("ixgbe_get_mac_addr_generic");
697 
698 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
699 	rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
700 
701 	for (i = 0; i < 4; i++)
702 		mac_addr[i] = (u8)(rar_low >> (i*8));
703 
704 	for (i = 0; i < 2; i++)
705 		mac_addr[i+4] = (u8)(rar_high >> (i*8));
706 
707 	return IXGBE_SUCCESS;
708 }
709 
710 /**
711  *  ixgbe_get_bus_info_generic - Generic set PCI bus info
712  *  @hw: pointer to hardware structure
713  *
714  *  Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
715  **/
716 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
717 {
718 	struct ixgbe_mac_info *mac = &hw->mac;
719 	u16 link_status;
720 
721 	DEBUGFUNC("ixgbe_get_bus_info_generic");
722 
723 	hw->bus.type = ixgbe_bus_type_pci_express;
724 
725 	/* Get the negotiated link width and speed from PCI config space */
726 	link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
727 
728 	switch (link_status & IXGBE_PCI_LINK_WIDTH) {
729 	case IXGBE_PCI_LINK_WIDTH_1:
730 		hw->bus.width = ixgbe_bus_width_pcie_x1;
731 		break;
732 	case IXGBE_PCI_LINK_WIDTH_2:
733 		hw->bus.width = ixgbe_bus_width_pcie_x2;
734 		break;
735 	case IXGBE_PCI_LINK_WIDTH_4:
736 		hw->bus.width = ixgbe_bus_width_pcie_x4;
737 		break;
738 	case IXGBE_PCI_LINK_WIDTH_8:
739 		hw->bus.width = ixgbe_bus_width_pcie_x8;
740 		break;
741 	default:
742 		hw->bus.width = ixgbe_bus_width_unknown;
743 		break;
744 	}
745 
746 	switch (link_status & IXGBE_PCI_LINK_SPEED) {
747 	case IXGBE_PCI_LINK_SPEED_2500:
748 		hw->bus.speed = ixgbe_bus_speed_2500;
749 		break;
750 	case IXGBE_PCI_LINK_SPEED_5000:
751 		hw->bus.speed = ixgbe_bus_speed_5000;
752 		break;
753 	case IXGBE_PCI_LINK_SPEED_8000:
754 		hw->bus.speed = ixgbe_bus_speed_8000;
755 		break;
756 	default:
757 		hw->bus.speed = ixgbe_bus_speed_unknown;
758 		break;
759 	}
760 
761 	mac->ops.set_lan_id(hw);
762 
763 	return IXGBE_SUCCESS;
764 }
765 
766 /**
767  *  ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
768  *  @hw: pointer to the HW structure
769  *
770  *  Determines the LAN function id by reading memory-mapped registers
771  *  and swaps the port value if requested.
772  **/
773 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
774 {
775 	struct ixgbe_bus_info *bus = &hw->bus;
776 	u32 reg;
777 
778 	DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
779 
780 	reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
781 	bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
782 	bus->lan_id = bus->func;
783 
784 	/* check for a port swap */
785 	reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
786 	if (reg & IXGBE_FACTPS_LFS)
787 		bus->func ^= 0x1;
788 }
789 
790 /**
791  *  ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
792  *  @hw: pointer to hardware structure
793  *
794  *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
795  *  disables transmit and receive units. The adapter_stopped flag is used by
796  *  the shared code and drivers to determine if the adapter is in a stopped
797  *  state and should not touch the hardware.
798  **/
799 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
800 {
801 	u32 reg_val;
802 	u16 i;
803 
804 	DEBUGFUNC("ixgbe_stop_adapter_generic");
805 
806 	/*
807 	 * Set the adapter_stopped flag so other driver functions stop touching
808 	 * the hardware
809 	 */
810 	hw->adapter_stopped = TRUE;
811 
812 	/* Disable the receive unit */
813 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0);
814 
815 	/* Clear interrupt mask to stop interrupts from being generated */
816 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
817 
818 	/* Clear any pending interrupts, flush previous writes */
819 	IXGBE_READ_REG(hw, IXGBE_EICR);
820 
821 	/* Disable the transmit unit.  Each queue must be disabled. */
822 	for (i = 0; i < hw->mac.max_tx_queues; i++)
823 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
824 
825 	/* Disable the receive unit by stopping each queue */
826 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
827 		reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
828 		reg_val &= ~IXGBE_RXDCTL_ENABLE;
829 		reg_val |= IXGBE_RXDCTL_SWFLSH;
830 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
831 	}
832 
833 	/* flush all queues disables */
834 	IXGBE_WRITE_FLUSH(hw);
835 	msec_delay(2);
836 
837 	/*
838 	 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
839 	 * access and verify no pending requests
840 	 */
841 	return ixgbe_disable_pcie_master(hw);
842 }
843 
844 /**
845  *  ixgbe_led_on_generic - Turns on the software controllable LEDs.
846  *  @hw: pointer to hardware structure
847  *  @index: led number to turn on
848  **/
849 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
850 {
851 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
852 
853 	DEBUGFUNC("ixgbe_led_on_generic");
854 
855 	/* To turn on the LED, set mode to ON. */
856 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
857 	led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
858 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
859 	IXGBE_WRITE_FLUSH(hw);
860 
861 	return IXGBE_SUCCESS;
862 }
863 
864 /**
865  *  ixgbe_led_off_generic - Turns off the software controllable LEDs.
866  *  @hw: pointer to hardware structure
867  *  @index: led number to turn off
868  **/
869 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
870 {
871 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
872 
873 	DEBUGFUNC("ixgbe_led_off_generic");
874 
875 	/* To turn off the LED, set mode to OFF. */
876 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
877 	led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
878 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
879 	IXGBE_WRITE_FLUSH(hw);
880 
881 	return IXGBE_SUCCESS;
882 }
883 
884 /**
885  *  ixgbe_init_eeprom_params_generic - Initialize EEPROM params
886  *  @hw: pointer to hardware structure
887  *
888  *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
889  *  ixgbe_hw struct in order to set up EEPROM access.
890  **/
891 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
892 {
893 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
894 	u32 eec;
895 	u16 eeprom_size;
896 
897 	DEBUGFUNC("ixgbe_init_eeprom_params_generic");
898 
899 	if (eeprom->type == ixgbe_eeprom_uninitialized) {
900 		eeprom->type = ixgbe_eeprom_none;
901 		/* Set default semaphore delay to 10ms which is a well
902 		 * tested value */
903 		eeprom->semaphore_delay = 10;
904 		/* Clear EEPROM page size, it will be initialized as needed */
905 		eeprom->word_page_size = 0;
906 
907 		/*
908 		 * Check for EEPROM present first.
909 		 * If not present leave as none
910 		 */
911 		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
912 		if (eec & IXGBE_EEC_PRES) {
913 			eeprom->type = ixgbe_eeprom_spi;
914 
915 			/*
916 			 * SPI EEPROM is assumed here.  This code would need to
917 			 * change if a future EEPROM is not SPI.
918 			 */
919 			eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
920 					    IXGBE_EEC_SIZE_SHIFT);
921 			eeprom->word_size = 1 << (eeprom_size +
922 					     IXGBE_EEPROM_WORD_SIZE_SHIFT);
923 		}
924 
925 		if (eec & IXGBE_EEC_ADDR_SIZE)
926 			eeprom->address_bits = 16;
927 		else
928 			eeprom->address_bits = 8;
929 		DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
930 			  "%d\n", eeprom->type, eeprom->word_size,
931 			  eeprom->address_bits);
932 	}
933 
934 	return IXGBE_SUCCESS;
935 }
936 
937 /**
938  *  ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
939  *  @hw: pointer to hardware structure
940  *  @offset: offset within the EEPROM to write
941  *  @words: number of word(s)
942  *  @data: 16 bit word(s) to write to EEPROM
943  *
944  *  Reads 16 bit word(s) from EEPROM through bit-bang method
945  **/
946 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
947 					       u16 words, u16 *data)
948 {
949 	s32 status = IXGBE_SUCCESS;
950 	u16 i, count;
951 
952 	DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
953 
954 	hw->eeprom.ops.init_params(hw);
955 
956 	if (words == 0) {
957 		status = IXGBE_ERR_INVALID_ARGUMENT;
958 		goto out;
959 	}
960 
961 	if (offset + words > hw->eeprom.word_size) {
962 		status = IXGBE_ERR_EEPROM;
963 		goto out;
964 	}
965 
966 	/*
967 	 * The EEPROM page size cannot be queried from the chip. We do lazy
968 	 * initialization. It is worth to do that when we write large buffer.
969 	 */
970 	if ((hw->eeprom.word_page_size == 0) &&
971 	    (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
972 		ixgbe_detect_eeprom_page_size_generic(hw, offset);
973 
974 	/*
975 	 * We cannot hold synchronization semaphores for too long
976 	 * to avoid other entity starvation. However it is more efficient
977 	 * to read in bursts than synchronizing access for each word.
978 	 */
979 	for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
980 		count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
981 			IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
982 		status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
983 							    count, &data[i]);
984 
985 		if (status != IXGBE_SUCCESS)
986 			break;
987 	}
988 
989 out:
990 	return status;
991 }
992 
993 /**
994  *  ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
995  *  @hw: pointer to hardware structure
996  *  @offset: offset within the EEPROM to be written to
997  *  @words: number of word(s)
998  *  @data: 16 bit word(s) to be written to the EEPROM
999  *
1000  *  If ixgbe_eeprom_update_checksum is not called after this function, the
1001  *  EEPROM will most likely contain an invalid checksum.
1002  **/
1003 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1004 					      u16 words, u16 *data)
1005 {
1006 	s32 status;
1007 	u16 word;
1008 	u16 page_size;
1009 	u16 i;
1010 	u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
1011 
1012 	DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
1013 
1014 	/* Prepare the EEPROM for writing  */
1015 	status = ixgbe_acquire_eeprom(hw);
1016 
1017 	if (status == IXGBE_SUCCESS) {
1018 		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1019 			ixgbe_release_eeprom(hw);
1020 			status = IXGBE_ERR_EEPROM;
1021 		}
1022 	}
1023 
1024 	if (status == IXGBE_SUCCESS) {
1025 		for (i = 0; i < words; i++) {
1026 			ixgbe_standby_eeprom(hw);
1027 
1028 			/*  Send the WRITE ENABLE command (8 bit opcode )  */
1029 			ixgbe_shift_out_eeprom_bits(hw,
1030 						   IXGBE_EEPROM_WREN_OPCODE_SPI,
1031 						   IXGBE_EEPROM_OPCODE_BITS);
1032 
1033 			ixgbe_standby_eeprom(hw);
1034 
1035 			/*
1036 			 * Some SPI eeproms use the 8th address bit embedded
1037 			 * in the opcode
1038 			 */
1039 			if ((hw->eeprom.address_bits == 8) &&
1040 			    ((offset + i) >= 128))
1041 				write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1042 
1043 			/* Send the Write command (8-bit opcode + addr) */
1044 			ixgbe_shift_out_eeprom_bits(hw, write_opcode,
1045 						    IXGBE_EEPROM_OPCODE_BITS);
1046 			ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1047 						    hw->eeprom.address_bits);
1048 
1049 			page_size = hw->eeprom.word_page_size;
1050 
1051 			/* Send the data in burst via SPI*/
1052 			do {
1053 				word = data[i];
1054 				word = (word >> 8) | (word << 8);
1055 				ixgbe_shift_out_eeprom_bits(hw, word, 16);
1056 
1057 				if (page_size == 0)
1058 					break;
1059 
1060 				/* do not wrap around page */
1061 				if (((offset + i) & (page_size - 1)) ==
1062 				    (page_size - 1))
1063 					break;
1064 			} while (++i < words);
1065 
1066 			ixgbe_standby_eeprom(hw);
1067 			msec_delay(10);
1068 		}
1069 		/* Done with writing - release the EEPROM */
1070 		ixgbe_release_eeprom(hw);
1071 	}
1072 
1073 	return status;
1074 }
1075 
1076 /**
1077  *  ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
1078  *  @hw: pointer to hardware structure
1079  *  @offset: offset within the EEPROM to be written to
1080  *  @data: 16 bit word to be written to the EEPROM
1081  *
1082  *  If ixgbe_eeprom_update_checksum is not called after this function, the
1083  *  EEPROM will most likely contain an invalid checksum.
1084  **/
1085 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1086 {
1087 	s32 status;
1088 
1089 	DEBUGFUNC("ixgbe_write_eeprom_generic");
1090 
1091 	hw->eeprom.ops.init_params(hw);
1092 
1093 	if (offset >= hw->eeprom.word_size) {
1094 		status = IXGBE_ERR_EEPROM;
1095 		goto out;
1096 	}
1097 
1098 	status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
1099 
1100 out:
1101 	return status;
1102 }
1103 
1104 /**
1105  *  ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
1106  *  @hw: pointer to hardware structure
1107  *  @offset: offset within the EEPROM to be read
1108  *  @data: read 16 bit words(s) from EEPROM
1109  *  @words: number of word(s)
1110  *
1111  *  Reads 16 bit word(s) from EEPROM through bit-bang method
1112  **/
1113 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1114 					      u16 words, u16 *data)
1115 {
1116 	s32 status = IXGBE_SUCCESS;
1117 	u16 i, count;
1118 
1119 	DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
1120 
1121 	hw->eeprom.ops.init_params(hw);
1122 
1123 	if (words == 0) {
1124 		status = IXGBE_ERR_INVALID_ARGUMENT;
1125 		goto out;
1126 	}
1127 
1128 	if (offset + words > hw->eeprom.word_size) {
1129 		status = IXGBE_ERR_EEPROM;
1130 		goto out;
1131 	}
1132 
1133 	/*
1134 	 * We cannot hold synchronization semaphores for too long
1135 	 * to avoid other entity starvation. However it is more efficient
1136 	 * to read in bursts than synchronizing access for each word.
1137 	 */
1138 	for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1139 		count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1140 			IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1141 
1142 		status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1143 							   count, &data[i]);
1144 
1145 		if (status != IXGBE_SUCCESS)
1146 			break;
1147 	}
1148 
1149 out:
1150 	return status;
1151 }
1152 
1153 /**
1154  *  ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1155  *  @hw: pointer to hardware structure
1156  *  @offset: offset within the EEPROM to be read
1157  *  @words: number of word(s)
1158  *  @data: read 16 bit word(s) from EEPROM
1159  *
1160  *  Reads 16 bit word(s) from EEPROM through bit-bang method
1161  **/
1162 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1163 					     u16 words, u16 *data)
1164 {
1165 	s32 status;
1166 	u16 word_in;
1167 	u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1168 	u16 i;
1169 
1170 	DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
1171 
1172 	/* Prepare the EEPROM for reading  */
1173 	status = ixgbe_acquire_eeprom(hw);
1174 
1175 	if (status == IXGBE_SUCCESS) {
1176 		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1177 			ixgbe_release_eeprom(hw);
1178 			status = IXGBE_ERR_EEPROM;
1179 		}
1180 	}
1181 
1182 	if (status == IXGBE_SUCCESS) {
1183 		for (i = 0; i < words; i++) {
1184 			ixgbe_standby_eeprom(hw);
1185 			/*
1186 			 * Some SPI eeproms use the 8th address bit embedded
1187 			 * in the opcode
1188 			 */
1189 			if ((hw->eeprom.address_bits == 8) &&
1190 			    ((offset + i) >= 128))
1191 				read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1192 
1193 			/* Send the READ command (opcode + addr) */
1194 			ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1195 						    IXGBE_EEPROM_OPCODE_BITS);
1196 			ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1197 						    hw->eeprom.address_bits);
1198 
1199 			/* Read the data. */
1200 			word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1201 			data[i] = (word_in >> 8) | (word_in << 8);
1202 		}
1203 
1204 		/* End this read operation */
1205 		ixgbe_release_eeprom(hw);
1206 	}
1207 
1208 	return status;
1209 }
1210 
1211 /**
1212  *  ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1213  *  @hw: pointer to hardware structure
1214  *  @offset: offset within the EEPROM to be read
1215  *  @data: read 16 bit value from EEPROM
1216  *
1217  *  Reads 16 bit value from EEPROM through bit-bang method
1218  **/
1219 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1220 				       u16 *data)
1221 {
1222 	s32 status;
1223 
1224 	DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1225 
1226 	hw->eeprom.ops.init_params(hw);
1227 
1228 	if (offset >= hw->eeprom.word_size) {
1229 		status = IXGBE_ERR_EEPROM;
1230 		goto out;
1231 	}
1232 
1233 	status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1234 
1235 out:
1236 	return status;
1237 }
1238 
1239 /**
1240  *  ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1241  *  @hw: pointer to hardware structure
1242  *  @offset: offset of word in the EEPROM to read
1243  *  @words: number of word(s)
1244  *  @data: 16 bit word(s) from the EEPROM
1245  *
1246  *  Reads a 16 bit word(s) from the EEPROM using the EERD register.
1247  **/
1248 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1249 				   u16 words, u16 *data)
1250 {
1251 	u32 eerd;
1252 	s32 status = IXGBE_SUCCESS;
1253 	u32 i;
1254 
1255 	DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
1256 
1257 	hw->eeprom.ops.init_params(hw);
1258 
1259 	if (words == 0) {
1260 		status = IXGBE_ERR_INVALID_ARGUMENT;
1261 		goto out;
1262 	}
1263 
1264 	if (offset >= hw->eeprom.word_size) {
1265 		status = IXGBE_ERR_EEPROM;
1266 		goto out;
1267 	}
1268 
1269 	for (i = 0; i < words; i++) {
1270 		eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) +
1271 		       IXGBE_EEPROM_RW_REG_START;
1272 
1273 		IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1274 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1275 
1276 		if (status == IXGBE_SUCCESS) {
1277 			data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1278 				   IXGBE_EEPROM_RW_REG_DATA);
1279 		} else {
1280 			DEBUGOUT("Eeprom read timed out\n");
1281 			goto out;
1282 		}
1283 	}
1284 out:
1285 	return status;
1286 }
1287 
1288 /**
1289  *  ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
1290  *  @hw: pointer to hardware structure
1291  *  @offset: offset within the EEPROM to be used as a scratch pad
1292  *
1293  *  Discover EEPROM page size by writing marching data at given offset.
1294  *  This function is called only when we are writing a new large buffer
1295  *  at given offset so the data would be overwritten anyway.
1296  **/
1297 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1298 						 u16 offset)
1299 {
1300 	u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1301 	s32 status = IXGBE_SUCCESS;
1302 	u16 i;
1303 
1304 	DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
1305 
1306 	for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1307 		data[i] = i;
1308 
1309 	hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1310 	status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1311 					     IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1312 	hw->eeprom.word_page_size = 0;
1313 	if (status != IXGBE_SUCCESS)
1314 		goto out;
1315 
1316 	status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1317 	if (status != IXGBE_SUCCESS)
1318 		goto out;
1319 
1320 	/*
1321 	 * When writing in burst more than the actual page size
1322 	 * EEPROM address wraps around current page.
1323 	 */
1324 	hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1325 
1326 	DEBUGOUT1("Detected EEPROM page size = %d words.",
1327 		  hw->eeprom.word_page_size);
1328 out:
1329 	return status;
1330 }
1331 
1332 /**
1333  *  ixgbe_read_eerd_generic - Read EEPROM word using EERD
1334  *  @hw: pointer to hardware structure
1335  *  @offset: offset of  word in the EEPROM to read
1336  *  @data: word read from the EEPROM
1337  *
1338  *  Reads a 16 bit word from the EEPROM using the EERD register.
1339  **/
1340 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1341 {
1342 	return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1343 }
1344 
1345 /**
1346  *  ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1347  *  @hw: pointer to hardware structure
1348  *  @offset: offset of  word in the EEPROM to write
1349  *  @words: number of word(s)
1350  *  @data: word(s) write to the EEPROM
1351  *
1352  *  Write a 16 bit word(s) to the EEPROM using the EEWR register.
1353  **/
1354 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1355 				    u16 words, u16 *data)
1356 {
1357 	u32 eewr;
1358 	s32 status = IXGBE_SUCCESS;
1359 	u16 i;
1360 
1361 	DEBUGFUNC("ixgbe_write_eewr_generic");
1362 
1363 	hw->eeprom.ops.init_params(hw);
1364 
1365 	if (words == 0) {
1366 		status = IXGBE_ERR_INVALID_ARGUMENT;
1367 		goto out;
1368 	}
1369 
1370 	if (offset >= hw->eeprom.word_size) {
1371 		status = IXGBE_ERR_EEPROM;
1372 		goto out;
1373 	}
1374 
1375 	for (i = 0; i < words; i++) {
1376 		eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1377 			(data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1378 			IXGBE_EEPROM_RW_REG_START;
1379 
1380 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1381 		if (status != IXGBE_SUCCESS) {
1382 			DEBUGOUT("Eeprom write EEWR timed out\n");
1383 			goto out;
1384 		}
1385 
1386 		IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1387 
1388 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1389 		if (status != IXGBE_SUCCESS) {
1390 			DEBUGOUT("Eeprom write EEWR timed out\n");
1391 			goto out;
1392 		}
1393 	}
1394 
1395 out:
1396 	return status;
1397 }
1398 
1399 /**
1400  *  ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1401  *  @hw: pointer to hardware structure
1402  *  @offset: offset of  word in the EEPROM to write
1403  *  @data: word write to the EEPROM
1404  *
1405  *  Write a 16 bit word to the EEPROM using the EEWR register.
1406  **/
1407 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1408 {
1409 	return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1410 }
1411 
1412 /**
1413  *  ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1414  *  @hw: pointer to hardware structure
1415  *  @ee_reg: EEPROM flag for polling
1416  *
1417  *  Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1418  *  read or write is done respectively.
1419  **/
1420 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1421 {
1422 	u32 i;
1423 	u32 reg;
1424 	s32 status = IXGBE_ERR_EEPROM;
1425 
1426 	DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1427 
1428 	for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1429 		if (ee_reg == IXGBE_NVM_POLL_READ)
1430 			reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1431 		else
1432 			reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1433 
1434 		if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1435 			status = IXGBE_SUCCESS;
1436 			break;
1437 		}
1438 		usec_delay(5);
1439 	}
1440 	return status;
1441 }
1442 
1443 /**
1444  *  ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1445  *  @hw: pointer to hardware structure
1446  *
1447  *  Prepares EEPROM for access using bit-bang method. This function should
1448  *  be called before issuing a command to the EEPROM.
1449  **/
1450 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1451 {
1452 	s32 status = IXGBE_SUCCESS;
1453 	u32 eec;
1454 	u32 i;
1455 
1456 	DEBUGFUNC("ixgbe_acquire_eeprom");
1457 
1458 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1459 	    != IXGBE_SUCCESS)
1460 		status = IXGBE_ERR_SWFW_SYNC;
1461 
1462 	if (status == IXGBE_SUCCESS) {
1463 		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1464 
1465 		/* Request EEPROM Access */
1466 		eec |= IXGBE_EEC_REQ;
1467 		IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1468 
1469 		for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1470 			eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1471 			if (eec & IXGBE_EEC_GNT)
1472 				break;
1473 			usec_delay(5);
1474 		}
1475 
1476 		/* Release if grant not acquired */
1477 		if (!(eec & IXGBE_EEC_GNT)) {
1478 			eec &= ~IXGBE_EEC_REQ;
1479 			IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1480 			DEBUGOUT("Could not acquire EEPROM grant\n");
1481 
1482 			hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1483 			status = IXGBE_ERR_EEPROM;
1484 		}
1485 
1486 		/* Setup EEPROM for Read/Write */
1487 		if (status == IXGBE_SUCCESS) {
1488 			/* Clear CS and SK */
1489 			eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1490 			IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1491 			IXGBE_WRITE_FLUSH(hw);
1492 			usec_delay(1);
1493 		}
1494 	}
1495 	return status;
1496 }
1497 
1498 /**
1499  *  ixgbe_get_eeprom_semaphore - Get hardware semaphore
1500  *  @hw: pointer to hardware structure
1501  *
1502  *  Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1503  **/
1504 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1505 {
1506 	s32 status = IXGBE_ERR_EEPROM;
1507 	u32 timeout = 2000;
1508 	u32 i;
1509 	u32 swsm;
1510 
1511 	DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1512 
1513 
1514 	/* Get SMBI software semaphore between device drivers first */
1515 	for (i = 0; i < timeout; i++) {
1516 		/*
1517 		 * If the SMBI bit is 0 when we read it, then the bit will be
1518 		 * set and we have the semaphore
1519 		 */
1520 		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1521 		if (!(swsm & IXGBE_SWSM_SMBI)) {
1522 			status = IXGBE_SUCCESS;
1523 			break;
1524 		}
1525 		usec_delay(50);
1526 	}
1527 
1528 	if (i == timeout) {
1529 		DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1530 			 "not granted.\n");
1531 		/*
1532 		 * this release is particularly important because our attempts
1533 		 * above to get the semaphore may have succeeded, and if there
1534 		 * was a timeout, we should unconditionally clear the semaphore
1535 		 * bits to free the driver to make progress
1536 		 */
1537 		ixgbe_release_eeprom_semaphore(hw);
1538 
1539 		usec_delay(50);
1540 		/*
1541 		 * one last try
1542 		 * If the SMBI bit is 0 when we read it, then the bit will be
1543 		 * set and we have the semaphore
1544 		 */
1545 		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1546 		if (!(swsm & IXGBE_SWSM_SMBI))
1547 			status = IXGBE_SUCCESS;
1548 	}
1549 
1550 	/* Now get the semaphore between SW/FW through the SWESMBI bit */
1551 	if (status == IXGBE_SUCCESS) {
1552 		for (i = 0; i < timeout; i++) {
1553 			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1554 
1555 			/* Set the SW EEPROM semaphore bit to request access */
1556 			swsm |= IXGBE_SWSM_SWESMBI;
1557 			IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1558 
1559 			/*
1560 			 * If we set the bit successfully then we got the
1561 			 * semaphore.
1562 			 */
1563 			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1564 			if (swsm & IXGBE_SWSM_SWESMBI)
1565 				break;
1566 
1567 			usec_delay(50);
1568 		}
1569 
1570 		/*
1571 		 * Release semaphores and return error if SW EEPROM semaphore
1572 		 * was not granted because we don't have access to the EEPROM
1573 		 */
1574 		if (i >= timeout) {
1575 			DEBUGOUT("SWESMBI Software EEPROM semaphore "
1576 				 "not granted.\n");
1577 			ixgbe_release_eeprom_semaphore(hw);
1578 			status = IXGBE_ERR_EEPROM;
1579 		}
1580 	} else {
1581 		DEBUGOUT("Software semaphore SMBI between device drivers "
1582 			 "not granted.\n");
1583 	}
1584 
1585 	return status;
1586 }
1587 
1588 /**
1589  *  ixgbe_release_eeprom_semaphore - Release hardware semaphore
1590  *  @hw: pointer to hardware structure
1591  *
1592  *  This function clears hardware semaphore bits.
1593  **/
1594 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1595 {
1596 	u32 swsm;
1597 
1598 	DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1599 
1600 	swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1601 
1602 	/* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1603 	swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1604 	IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1605 	IXGBE_WRITE_FLUSH(hw);
1606 }
1607 
1608 /**
1609  *  ixgbe_ready_eeprom - Polls for EEPROM ready
1610  *  @hw: pointer to hardware structure
1611  **/
1612 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1613 {
1614 	s32 status = IXGBE_SUCCESS;
1615 	u16 i;
1616 	u8 spi_stat_reg;
1617 
1618 	DEBUGFUNC("ixgbe_ready_eeprom");
1619 
1620 	/*
1621 	 * Read "Status Register" repeatedly until the LSB is cleared.  The
1622 	 * EEPROM will signal that the command has been completed by clearing
1623 	 * bit 0 of the internal status register.  If it's not cleared within
1624 	 * 5 milliseconds, then error out.
1625 	 */
1626 	for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1627 		ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1628 					    IXGBE_EEPROM_OPCODE_BITS);
1629 		spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
1630 		if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1631 			break;
1632 
1633 		usec_delay(5);
1634 		ixgbe_standby_eeprom(hw);
1635 	};
1636 
1637 	/*
1638 	 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
1639 	 * devices (and only 0-5mSec on 5V devices)
1640 	 */
1641 	if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
1642 		DEBUGOUT("SPI EEPROM Status error\n");
1643 		status = IXGBE_ERR_EEPROM;
1644 	}
1645 
1646 	return status;
1647 }
1648 
1649 /**
1650  *  ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
1651  *  @hw: pointer to hardware structure
1652  **/
1653 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1654 {
1655 	u32 eec;
1656 
1657 	DEBUGFUNC("ixgbe_standby_eeprom");
1658 
1659 	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1660 
1661 	/* Toggle CS to flush commands */
1662 	eec |= IXGBE_EEC_CS;
1663 	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1664 	IXGBE_WRITE_FLUSH(hw);
1665 	usec_delay(1);
1666 	eec &= ~IXGBE_EEC_CS;
1667 	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1668 	IXGBE_WRITE_FLUSH(hw);
1669 	usec_delay(1);
1670 }
1671 
1672 /**
1673  *  ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
1674  *  @hw: pointer to hardware structure
1675  *  @data: data to send to the EEPROM
1676  *  @count: number of bits to shift out
1677  **/
1678 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
1679 					u16 count)
1680 {
1681 	u32 eec;
1682 	u32 mask;
1683 	u32 i;
1684 
1685 	DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
1686 
1687 	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1688 
1689 	/*
1690 	 * Mask is used to shift "count" bits of "data" out to the EEPROM
1691 	 * one bit at a time.  Determine the starting bit based on count
1692 	 */
1693 	mask = 0x01 << (count - 1);
1694 
1695 	for (i = 0; i < count; i++) {
1696 		/*
1697 		 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
1698 		 * "1", and then raising and then lowering the clock (the SK
1699 		 * bit controls the clock input to the EEPROM).  A "0" is
1700 		 * shifted out to the EEPROM by setting "DI" to "0" and then
1701 		 * raising and then lowering the clock.
1702 		 */
1703 		if (data & mask)
1704 			eec |= IXGBE_EEC_DI;
1705 		else
1706 			eec &= ~IXGBE_EEC_DI;
1707 
1708 		IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1709 		IXGBE_WRITE_FLUSH(hw);
1710 
1711 		usec_delay(1);
1712 
1713 		ixgbe_raise_eeprom_clk(hw, &eec);
1714 		ixgbe_lower_eeprom_clk(hw, &eec);
1715 
1716 		/*
1717 		 * Shift mask to signify next bit of data to shift in to the
1718 		 * EEPROM
1719 		 */
1720 		mask = mask >> 1;
1721 	};
1722 
1723 	/* We leave the "DI" bit set to "0" when we leave this routine. */
1724 	eec &= ~IXGBE_EEC_DI;
1725 	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1726 	IXGBE_WRITE_FLUSH(hw);
1727 }
1728 
1729 /**
1730  *  ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
1731  *  @hw: pointer to hardware structure
1732  **/
1733 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
1734 {
1735 	u32 eec;
1736 	u32 i;
1737 	u16 data = 0;
1738 
1739 	DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
1740 
1741 	/*
1742 	 * In order to read a register from the EEPROM, we need to shift
1743 	 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
1744 	 * the clock input to the EEPROM (setting the SK bit), and then reading
1745 	 * the value of the "DO" bit.  During this "shifting in" process the
1746 	 * "DI" bit should always be clear.
1747 	 */
1748 	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1749 
1750 	eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
1751 
1752 	for (i = 0; i < count; i++) {
1753 		data = data << 1;
1754 		ixgbe_raise_eeprom_clk(hw, &eec);
1755 
1756 		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1757 
1758 		eec &= ~(IXGBE_EEC_DI);
1759 		if (eec & IXGBE_EEC_DO)
1760 			data |= 1;
1761 
1762 		ixgbe_lower_eeprom_clk(hw, &eec);
1763 	}
1764 
1765 	return data;
1766 }
1767 
1768 /**
1769  *  ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
1770  *  @hw: pointer to hardware structure
1771  *  @eec: EEC register's current value
1772  **/
1773 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
1774 {
1775 	DEBUGFUNC("ixgbe_raise_eeprom_clk");
1776 
1777 	/*
1778 	 * Raise the clock input to the EEPROM
1779 	 * (setting the SK bit), then delay
1780 	 */
1781 	*eec = *eec | IXGBE_EEC_SK;
1782 	IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
1783 	IXGBE_WRITE_FLUSH(hw);
1784 	usec_delay(1);
1785 }
1786 
1787 /**
1788  *  ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
1789  *  @hw: pointer to hardware structure
1790  *  @eecd: EECD's current value
1791  **/
1792 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
1793 {
1794 	DEBUGFUNC("ixgbe_lower_eeprom_clk");
1795 
1796 	/*
1797 	 * Lower the clock input to the EEPROM (clearing the SK bit), then
1798 	 * delay
1799 	 */
1800 	*eec = *eec & ~IXGBE_EEC_SK;
1801 	IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
1802 	IXGBE_WRITE_FLUSH(hw);
1803 	usec_delay(1);
1804 }
1805 
1806 /**
1807  *  ixgbe_release_eeprom - Release EEPROM, release semaphores
1808  *  @hw: pointer to hardware structure
1809  **/
1810 static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
1811 {
1812 	u32 eec;
1813 
1814 	DEBUGFUNC("ixgbe_release_eeprom");
1815 
1816 	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1817 
1818 	eec |= IXGBE_EEC_CS;  /* Pull CS high */
1819 	eec &= ~IXGBE_EEC_SK; /* Lower SCK */
1820 
1821 	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1822 	IXGBE_WRITE_FLUSH(hw);
1823 
1824 	usec_delay(1);
1825 
1826 	/* Stop requesting EEPROM access */
1827 	eec &= ~IXGBE_EEC_REQ;
1828 	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1829 
1830 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1831 
1832 	/* Delay before attempt to obtain semaphore again to allow FW access */
1833 	msec_delay(hw->eeprom.semaphore_delay);
1834 }
1835 
1836 /**
1837  *  ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
1838  *  @hw: pointer to hardware structure
1839  **/
1840 u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
1841 {
1842 	u16 i;
1843 	u16 j;
1844 	u16 checksum = 0;
1845 	u16 length = 0;
1846 	u16 pointer = 0;
1847 	u16 word = 0;
1848 
1849 	DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
1850 
1851 	/* Include 0x0-0x3F in the checksum */
1852 	for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
1853 		if (hw->eeprom.ops.read(hw, i, &word) != IXGBE_SUCCESS) {
1854 			DEBUGOUT("EEPROM read failed\n");
1855 			break;
1856 		}
1857 		checksum += word;
1858 	}
1859 
1860 	/* Include all data from pointers except for the fw pointer */
1861 	for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
1862 		hw->eeprom.ops.read(hw, i, &pointer);
1863 
1864 		/* Make sure the pointer seems valid */
1865 		if (pointer != 0xFFFF && pointer != 0) {
1866 			hw->eeprom.ops.read(hw, pointer, &length);
1867 
1868 			if (length != 0xFFFF && length != 0) {
1869 				for (j = pointer+1; j <= pointer+length; j++) {
1870 					hw->eeprom.ops.read(hw, j, &word);
1871 					checksum += word;
1872 				}
1873 			}
1874 		}
1875 	}
1876 
1877 	checksum = (u16)IXGBE_EEPROM_SUM - checksum;
1878 
1879 	return checksum;
1880 }
1881 
1882 /**
1883  *  ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
1884  *  @hw: pointer to hardware structure
1885  *  @checksum_val: calculated checksum
1886  *
1887  *  Performs checksum calculation and validates the EEPROM checksum.  If the
1888  *  caller does not need checksum_val, the value can be NULL.
1889  **/
1890 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
1891 					   u16 *checksum_val)
1892 {
1893 	s32 status;
1894 	u16 checksum;
1895 	u16 read_checksum = 0;
1896 
1897 	DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
1898 
1899 	/*
1900 	 * Read the first word from the EEPROM. If this times out or fails, do
1901 	 * not continue or we could be in for a very long wait while every
1902 	 * EEPROM read fails
1903 	 */
1904 	status = hw->eeprom.ops.read(hw, 0, &checksum);
1905 
1906 	if (status == IXGBE_SUCCESS) {
1907 		checksum = hw->eeprom.ops.calc_checksum(hw);
1908 
1909 		hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
1910 
1911 		/*
1912 		 * Verify read checksum from EEPROM is the same as
1913 		 * calculated checksum
1914 		 */
1915 		if (read_checksum != checksum)
1916 			status = IXGBE_ERR_EEPROM_CHECKSUM;
1917 
1918 		/* If the user cares, return the calculated checksum */
1919 		if (checksum_val)
1920 			*checksum_val = checksum;
1921 	} else {
1922 		DEBUGOUT("EEPROM read failed\n");
1923 	}
1924 
1925 	return status;
1926 }
1927 
1928 /**
1929  *  ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
1930  *  @hw: pointer to hardware structure
1931  **/
1932 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1933 {
1934 	s32 status;
1935 	u16 checksum;
1936 
1937 	DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
1938 
1939 	/*
1940 	 * Read the first word from the EEPROM. If this times out or fails, do
1941 	 * not continue or we could be in for a very long wait while every
1942 	 * EEPROM read fails
1943 	 */
1944 	status = hw->eeprom.ops.read(hw, 0, &checksum);
1945 
1946 	if (status == IXGBE_SUCCESS) {
1947 		checksum = hw->eeprom.ops.calc_checksum(hw);
1948 		status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
1949 					      checksum);
1950 	} else {
1951 		DEBUGOUT("EEPROM read failed\n");
1952 	}
1953 
1954 	return status;
1955 }
1956 
1957 /**
1958  *  ixgbe_validate_mac_addr - Validate MAC address
1959  *  @mac_addr: pointer to MAC address.
1960  *
1961  *  Tests a MAC address to ensure it is a valid Individual Address
1962  **/
1963 s32 ixgbe_validate_mac_addr(u8 *mac_addr)
1964 {
1965 	s32 status = IXGBE_SUCCESS;
1966 
1967 	DEBUGFUNC("ixgbe_validate_mac_addr");
1968 
1969 	/* Make sure it is not a multicast address */
1970 	if (IXGBE_IS_MULTICAST(mac_addr)) {
1971 		DEBUGOUT("MAC address is multicast\n");
1972 		status = IXGBE_ERR_INVALID_MAC_ADDR;
1973 	/* Not a broadcast address */
1974 	} else if (IXGBE_IS_BROADCAST(mac_addr)) {
1975 		DEBUGOUT("MAC address is broadcast\n");
1976 		status = IXGBE_ERR_INVALID_MAC_ADDR;
1977 	/* Reject the zero address */
1978 	} else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
1979 		   mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
1980 		DEBUGOUT("MAC address is all zeros\n");
1981 		status = IXGBE_ERR_INVALID_MAC_ADDR;
1982 	}
1983 	return status;
1984 }
1985 
1986 /**
1987  *  ixgbe_set_rar_generic - Set Rx address register
1988  *  @hw: pointer to hardware structure
1989  *  @index: Receive address register to write
1990  *  @addr: Address to put into receive address register
1991  *  @vmdq: VMDq "set" or "pool" index
1992  *  @enable_addr: set flag that address is active
1993  *
1994  *  Puts an ethernet address into a receive address register.
1995  **/
1996 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
1997 			  u32 enable_addr)
1998 {
1999 	u32 rar_low, rar_high;
2000 	u32 rar_entries = hw->mac.num_rar_entries;
2001 
2002 	DEBUGFUNC("ixgbe_set_rar_generic");
2003 
2004 	/* Make sure we are using a valid rar index range */
2005 	if (index >= rar_entries) {
2006 		DEBUGOUT1("RAR index %d is out of range.\n", index);
2007 		return IXGBE_ERR_INVALID_ARGUMENT;
2008 	}
2009 
2010 	/* setup VMDq pool selection before this RAR gets enabled */
2011 	hw->mac.ops.set_vmdq(hw, index, vmdq);
2012 
2013 	/*
2014 	 * HW expects these in little endian so we reverse the byte
2015 	 * order from network order (big endian) to little endian
2016 	 */
2017 	rar_low = ((u32)addr[0] |
2018 		   ((u32)addr[1] << 8) |
2019 		   ((u32)addr[2] << 16) |
2020 		   ((u32)addr[3] << 24));
2021 	/*
2022 	 * Some parts put the VMDq setting in the extra RAH bits,
2023 	 * so save everything except the lower 16 bits that hold part
2024 	 * of the address and the address valid bit.
2025 	 */
2026 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2027 	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2028 	rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
2029 
2030 	if (enable_addr != 0)
2031 		rar_high |= IXGBE_RAH_AV;
2032 
2033 	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
2034 	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2035 
2036 	return IXGBE_SUCCESS;
2037 }
2038 
2039 /**
2040  *  ixgbe_clear_rar_generic - Remove Rx address register
2041  *  @hw: pointer to hardware structure
2042  *  @index: Receive address register to write
2043  *
2044  *  Clears an ethernet address from a receive address register.
2045  **/
2046 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
2047 {
2048 	u32 rar_high;
2049 	u32 rar_entries = hw->mac.num_rar_entries;
2050 
2051 	DEBUGFUNC("ixgbe_clear_rar_generic");
2052 
2053 	/* Make sure we are using a valid rar index range */
2054 	if (index >= rar_entries) {
2055 		DEBUGOUT1("RAR index %d is out of range.\n", index);
2056 		return IXGBE_ERR_INVALID_ARGUMENT;
2057 	}
2058 
2059 	/*
2060 	 * Some parts put the VMDq setting in the extra RAH bits,
2061 	 * so save everything except the lower 16 bits that hold part
2062 	 * of the address and the address valid bit.
2063 	 */
2064 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2065 	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2066 
2067 	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
2068 	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2069 
2070 	/* clear VMDq pool/queue selection for this RAR */
2071 	hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
2072 
2073 	return IXGBE_SUCCESS;
2074 }
2075 
2076 /**
2077  *  ixgbe_init_rx_addrs_generic - Initializes receive address filters.
2078  *  @hw: pointer to hardware structure
2079  *
2080  *  Places the MAC address in receive address register 0 and clears the rest
2081  *  of the receive address registers. Clears the multicast table. Assumes
2082  *  the receiver is in reset when the routine is called.
2083  **/
2084 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
2085 {
2086 	u32 i;
2087 	u32 rar_entries = hw->mac.num_rar_entries;
2088 
2089 	DEBUGFUNC("ixgbe_init_rx_addrs_generic");
2090 
2091 	/*
2092 	 * If the current mac address is valid, assume it is a software override
2093 	 * to the permanent address.
2094 	 * Otherwise, use the permanent address from the eeprom.
2095 	 */
2096 	if (ixgbe_validate_mac_addr(hw->mac.addr) ==
2097 	    IXGBE_ERR_INVALID_MAC_ADDR) {
2098 		/* Get the MAC address from the RAR0 for later reference */
2099 		hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2100 
2101 		DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
2102 			  hw->mac.addr[0], hw->mac.addr[1],
2103 			  hw->mac.addr[2]);
2104 		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2105 			  hw->mac.addr[4], hw->mac.addr[5]);
2106 	} else {
2107 		/* Setup the receive address. */
2108 		DEBUGOUT("Overriding MAC Address in RAR[0]\n");
2109 		DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
2110 			  hw->mac.addr[0], hw->mac.addr[1],
2111 			  hw->mac.addr[2]);
2112 		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2113 			  hw->mac.addr[4], hw->mac.addr[5]);
2114 
2115 		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2116 
2117 		/* clear VMDq pool/queue selection for RAR 0 */
2118 		hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
2119 	}
2120 	hw->addr_ctrl.overflow_promisc = 0;
2121 
2122 	hw->addr_ctrl.rar_used_count = 1;
2123 
2124 	/* Zero out the other receive addresses. */
2125 	DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
2126 	for (i = 1; i < rar_entries; i++) {
2127 		IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
2128 		IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
2129 	}
2130 
2131 	/* Clear the MTA */
2132 	hw->addr_ctrl.mta_in_use = 0;
2133 	IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2134 
2135 	DEBUGOUT(" Clearing MTA\n");
2136 	for (i = 0; i < hw->mac.mcft_size; i++)
2137 		IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
2138 
2139 	ixgbe_init_uta_tables(hw);
2140 
2141 	return IXGBE_SUCCESS;
2142 }
2143 
2144 /**
2145  *  ixgbe_add_uc_addr - Adds a secondary unicast address.
2146  *  @hw: pointer to hardware structure
2147  *  @addr: new address
2148  *
2149  *  Adds it to unused receive address register or goes into promiscuous mode.
2150  **/
2151 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
2152 {
2153 	u32 rar_entries = hw->mac.num_rar_entries;
2154 	u32 rar;
2155 
2156 	DEBUGFUNC("ixgbe_add_uc_addr");
2157 
2158 	DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
2159 		  addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
2160 
2161 	/*
2162 	 * Place this address in the RAR if there is room,
2163 	 * else put the controller into promiscuous mode
2164 	 */
2165 	if (hw->addr_ctrl.rar_used_count < rar_entries) {
2166 		rar = hw->addr_ctrl.rar_used_count;
2167 		hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2168 		DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
2169 		hw->addr_ctrl.rar_used_count++;
2170 	} else {
2171 		hw->addr_ctrl.overflow_promisc++;
2172 	}
2173 
2174 	DEBUGOUT("ixgbe_add_uc_addr Complete\n");
2175 }
2176 
2177 /**
2178  *  ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
2179  *  @hw: pointer to hardware structure
2180  *  @addr_list: the list of new addresses
2181  *  @addr_count: number of addresses
2182  *  @next: iterator function to walk the address list
2183  *
2184  *  The given list replaces any existing list.  Clears the secondary addrs from
2185  *  receive address registers.  Uses unused receive address registers for the
2186  *  first secondary addresses, and falls back to promiscuous mode as needed.
2187  *
2188  *  Drivers using secondary unicast addresses must set user_set_promisc when
2189  *  manually putting the device into promiscuous mode.
2190  **/
2191 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
2192 				      u32 addr_count, ixgbe_mc_addr_itr next)
2193 {
2194 	u8 *addr;
2195 	u32 i;
2196 	u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
2197 	u32 uc_addr_in_use;
2198 	u32 fctrl;
2199 	u32 vmdq;
2200 
2201 	DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
2202 
2203 	/*
2204 	 * Clear accounting of old secondary address list,
2205 	 * don't count RAR[0]
2206 	 */
2207 	uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
2208 	hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
2209 	hw->addr_ctrl.overflow_promisc = 0;
2210 
2211 	/* Zero out the other receive addresses */
2212 	DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
2213 	for (i = 0; i < uc_addr_in_use; i++) {
2214 		IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
2215 		IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
2216 	}
2217 
2218 	/* Add the new addresses */
2219 	for (i = 0; i < addr_count; i++) {
2220 		DEBUGOUT(" Adding the secondary addresses:\n");
2221 		addr = next(hw, &addr_list, &vmdq);
2222 		ixgbe_add_uc_addr(hw, addr, vmdq);
2223 	}
2224 
2225 	if (hw->addr_ctrl.overflow_promisc) {
2226 		/* enable promisc if not already in overflow or set by user */
2227 		if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2228 			DEBUGOUT(" Entering address overflow promisc mode\n");
2229 			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2230 			fctrl |= IXGBE_FCTRL_UPE;
2231 			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2232 		}
2233 	} else {
2234 		/* only disable if set by overflow, not by user */
2235 		if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2236 			DEBUGOUT(" Leaving address overflow promisc mode\n");
2237 			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2238 			fctrl &= ~IXGBE_FCTRL_UPE;
2239 			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2240 		}
2241 	}
2242 
2243 	DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
2244 	return IXGBE_SUCCESS;
2245 }
2246 
2247 /**
2248  *  ixgbe_mta_vector - Determines bit-vector in multicast table to set
2249  *  @hw: pointer to hardware structure
2250  *  @mc_addr: the multicast address
2251  *
2252  *  Extracts the 12 bits, from a multicast address, to determine which
2253  *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
2254  *  incoming rx multicast addresses, to determine the bit-vector to check in
2255  *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
2256  *  by the MO field of the MCSTCTRL. The MO field is set during initialization
2257  *  to mc_filter_type.
2258  **/
2259 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
2260 {
2261 	u32 vector = 0;
2262 
2263 	DEBUGFUNC("ixgbe_mta_vector");
2264 
2265 	switch (hw->mac.mc_filter_type) {
2266 	case 0:   /* use bits [47:36] of the address */
2267 		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
2268 		break;
2269 	case 1:   /* use bits [46:35] of the address */
2270 		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
2271 		break;
2272 	case 2:   /* use bits [45:34] of the address */
2273 		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
2274 		break;
2275 	case 3:   /* use bits [43:32] of the address */
2276 		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
2277 		break;
2278 	default:  /* Invalid mc_filter_type */
2279 		DEBUGOUT("MC filter type param set incorrectly\n");
2280 		ASSERT(0);
2281 		break;
2282 	}
2283 
2284 	/* vector can only be 12-bits or boundary will be exceeded */
2285 	vector &= 0xFFF;
2286 	return vector;
2287 }
2288 
2289 /**
2290  *  ixgbe_set_mta - Set bit-vector in multicast table
2291  *  @hw: pointer to hardware structure
2292  *  @hash_value: Multicast address hash value
2293  *
2294  *  Sets the bit-vector in the multicast table.
2295  **/
2296 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
2297 {
2298 	u32 vector;
2299 	u32 vector_bit;
2300 	u32 vector_reg;
2301 
2302 	DEBUGFUNC("ixgbe_set_mta");
2303 
2304 	hw->addr_ctrl.mta_in_use++;
2305 
2306 	vector = ixgbe_mta_vector(hw, mc_addr);
2307 	DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
2308 
2309 	/*
2310 	 * The MTA is a register array of 128 32-bit registers. It is treated
2311 	 * like an array of 4096 bits.  We want to set bit
2312 	 * BitArray[vector_value]. So we figure out what register the bit is
2313 	 * in, read it, OR in the new bit, then write back the new value.  The
2314 	 * register is determined by the upper 7 bits of the vector value and
2315 	 * the bit within that register are determined by the lower 5 bits of
2316 	 * the value.
2317 	 */
2318 	vector_reg = (vector >> 5) & 0x7F;
2319 	vector_bit = vector & 0x1F;
2320 	hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2321 }
2322 
2323 /**
2324  *  ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2325  *  @hw: pointer to hardware structure
2326  *  @mc_addr_list: the list of new multicast addresses
2327  *  @mc_addr_count: number of addresses
2328  *  @next: iterator function to walk the multicast address list
2329  *  @clear: flag, when set clears the table beforehand
2330  *
2331  *  When the clear flag is set, the given list replaces any existing list.
2332  *  Hashes the given addresses into the multicast table.
2333  **/
2334 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
2335 				      u32 mc_addr_count, ixgbe_mc_addr_itr next,
2336 				      bool clear)
2337 {
2338 	u32 i;
2339 	u32 vmdq;
2340 
2341 	DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2342 
2343 	/*
2344 	 * Set the new number of MC addresses that we are being requested to
2345 	 * use.
2346 	 */
2347 	hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2348 	hw->addr_ctrl.mta_in_use = 0;
2349 
2350 	/* Clear mta_shadow */
2351 	if (clear) {
2352 		DEBUGOUT(" Clearing MTA\n");
2353 		memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2354 	}
2355 
2356 	/* Update mta_shadow */
2357 	for (i = 0; i < mc_addr_count; i++) {
2358 		DEBUGOUT(" Adding the multicast addresses:\n");
2359 		ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2360 	}
2361 
2362 	/* Enable mta */
2363 	for (i = 0; i < hw->mac.mcft_size; i++)
2364 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2365 				      hw->mac.mta_shadow[i]);
2366 
2367 	if (hw->addr_ctrl.mta_in_use > 0)
2368 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2369 				IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2370 
2371 	DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2372 	return IXGBE_SUCCESS;
2373 }
2374 
2375 /**
2376  *  ixgbe_enable_mc_generic - Enable multicast address in RAR
2377  *  @hw: pointer to hardware structure
2378  *
2379  *  Enables multicast address in RAR and the use of the multicast hash table.
2380  **/
2381 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2382 {
2383 	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2384 
2385 	DEBUGFUNC("ixgbe_enable_mc_generic");
2386 
2387 	if (a->mta_in_use > 0)
2388 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2389 				hw->mac.mc_filter_type);
2390 
2391 	return IXGBE_SUCCESS;
2392 }
2393 
2394 /**
2395  *  ixgbe_disable_mc_generic - Disable multicast address in RAR
2396  *  @hw: pointer to hardware structure
2397  *
2398  *  Disables multicast address in RAR and the use of the multicast hash table.
2399  **/
2400 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2401 {
2402 	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2403 
2404 	DEBUGFUNC("ixgbe_disable_mc_generic");
2405 
2406 	if (a->mta_in_use > 0)
2407 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2408 
2409 	return IXGBE_SUCCESS;
2410 }
2411 
2412 /**
2413  *  ixgbe_fc_enable_generic - Enable flow control
2414  *  @hw: pointer to hardware structure
2415  *
2416  *  Enable flow control according to the current settings.
2417  **/
2418 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2419 {
2420 	s32 ret_val = IXGBE_SUCCESS;
2421 	u32 mflcn_reg, fccfg_reg;
2422 	u32 reg;
2423 	u32 fcrtl, fcrth;
2424 	int i;
2425 
2426 	DEBUGFUNC("ixgbe_fc_enable_generic");
2427 
2428 	/* Validate the water mark configuration */
2429 	if (!hw->fc.pause_time) {
2430 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2431 		goto out;
2432 	}
2433 
2434 	/* Low water mark of zero causes XOFF floods */
2435 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2436 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2437 		    hw->fc.high_water[i]) {
2438 			if (!hw->fc.low_water[i] ||
2439 			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2440 				DEBUGOUT("Invalid water mark configuration\n");
2441 				ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2442 				goto out;
2443 			}
2444 		}
2445 	}
2446 
2447 	/* Negotiate the fc mode to use */
2448 	ixgbe_fc_autoneg(hw);
2449 
2450 	/* Disable any previous flow control settings */
2451 	mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2452 	mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2453 
2454 	fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2455 	fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2456 
2457 	/*
2458 	 * The possible values of fc.current_mode are:
2459 	 * 0: Flow control is completely disabled
2460 	 * 1: Rx flow control is enabled (we can receive pause frames,
2461 	 *    but not send pause frames).
2462 	 * 2: Tx flow control is enabled (we can send pause frames but
2463 	 *    we do not support receiving pause frames).
2464 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2465 	 * other: Invalid.
2466 	 */
2467 	switch (hw->fc.current_mode) {
2468 	case ixgbe_fc_none:
2469 		/*
2470 		 * Flow control is disabled by software override or autoneg.
2471 		 * The code below will actually disable it in the HW.
2472 		 */
2473 		break;
2474 	case ixgbe_fc_rx_pause:
2475 		/*
2476 		 * Rx Flow control is enabled and Tx Flow control is
2477 		 * disabled by software override. Since there really
2478 		 * isn't a way to advertise that we are capable of RX
2479 		 * Pause ONLY, we will advertise that we support both
2480 		 * symmetric and asymmetric Rx PAUSE.  Later, we will
2481 		 * disable the adapter's ability to send PAUSE frames.
2482 		 */
2483 		mflcn_reg |= IXGBE_MFLCN_RFCE;
2484 		break;
2485 	case ixgbe_fc_tx_pause:
2486 		/*
2487 		 * Tx Flow control is enabled, and Rx Flow control is
2488 		 * disabled by software override.
2489 		 */
2490 		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2491 		break;
2492 	case ixgbe_fc_full:
2493 		/* Flow control (both Rx and Tx) is enabled by SW override. */
2494 		mflcn_reg |= IXGBE_MFLCN_RFCE;
2495 		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2496 		break;
2497 	default:
2498 		DEBUGOUT("Flow control param set incorrectly\n");
2499 		ret_val = IXGBE_ERR_CONFIG;
2500 		goto out;
2501 	}
2502 
2503 	/* Set 802.3x based flow control settings. */
2504 	mflcn_reg |= IXGBE_MFLCN_DPF;
2505 	IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2506 	IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2507 
2508 
2509 	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
2510 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2511 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2512 		    hw->fc.high_water[i]) {
2513 			fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2514 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2515 			fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2516 		} else {
2517 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2518 			/*
2519 			 * In order to prevent Tx hangs when the internal Tx
2520 			 * switch is enabled we must set the high water mark
2521 			 * to the maximum FCRTH value.  This allows the Tx
2522 			 * switch to function even under heavy Rx workloads.
2523 			 */
2524 			fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
2525 		}
2526 
2527 		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2528 	}
2529 
2530 	/* Configure pause time (2 TCs per register) */
2531 	reg = hw->fc.pause_time * 0x00010001;
2532 	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2533 		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2534 
2535 	/* Configure flow control refresh threshold value */
2536 	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2537 
2538 out:
2539 	return ret_val;
2540 }
2541 
2542 /**
2543  *  ixgbe_negotiate_fc - Negotiate flow control
2544  *  @hw: pointer to hardware structure
2545  *  @adv_reg: flow control advertised settings
2546  *  @lp_reg: link partner's flow control settings
2547  *  @adv_sym: symmetric pause bit in advertisement
2548  *  @adv_asm: asymmetric pause bit in advertisement
2549  *  @lp_sym: symmetric pause bit in link partner advertisement
2550  *  @lp_asm: asymmetric pause bit in link partner advertisement
2551  *
2552  *  Find the intersection between advertised settings and link partner's
2553  *  advertised settings
2554  **/
2555 static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2556 			      u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2557 {
2558 	if ((!(adv_reg)) ||  (!(lp_reg)))
2559 		return IXGBE_ERR_FC_NOT_NEGOTIATED;
2560 
2561 	if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2562 		/*
2563 		 * Now we need to check if the user selected Rx ONLY
2564 		 * of pause frames.  In this case, we had to advertise
2565 		 * FULL flow control because we could not advertise RX
2566 		 * ONLY. Hence, we must now check to see if we need to
2567 		 * turn OFF the TRANSMISSION of PAUSE frames.
2568 		 */
2569 		if (hw->fc.requested_mode == ixgbe_fc_full) {
2570 			hw->fc.current_mode = ixgbe_fc_full;
2571 			DEBUGOUT("Flow Control = FULL.\n");
2572 		} else {
2573 			hw->fc.current_mode = ixgbe_fc_rx_pause;
2574 			DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2575 		}
2576 	} else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2577 		   (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2578 		hw->fc.current_mode = ixgbe_fc_tx_pause;
2579 		DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2580 	} else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2581 		   !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2582 		hw->fc.current_mode = ixgbe_fc_rx_pause;
2583 		DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2584 	} else {
2585 		hw->fc.current_mode = ixgbe_fc_none;
2586 		DEBUGOUT("Flow Control = NONE.\n");
2587 	}
2588 	return IXGBE_SUCCESS;
2589 }
2590 
2591 /**
2592  *  ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2593  *  @hw: pointer to hardware structure
2594  *
2595  *  Enable flow control according on 1 gig fiber.
2596  **/
2597 static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2598 {
2599 	u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
2600 	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2601 
2602 	/*
2603 	 * On multispeed fiber at 1g, bail out if
2604 	 * - link is up but AN did not complete, or if
2605 	 * - link is up and AN completed but timed out
2606 	 */
2607 
2608 	linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2609 	if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2610 	    (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1))
2611 		goto out;
2612 
2613 	pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2614 	pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
2615 
2616 	ret_val =  ixgbe_negotiate_fc(hw, pcs_anadv_reg,
2617 				      pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
2618 				      IXGBE_PCS1GANA_ASM_PAUSE,
2619 				      IXGBE_PCS1GANA_SYM_PAUSE,
2620 				      IXGBE_PCS1GANA_ASM_PAUSE);
2621 
2622 out:
2623 	return ret_val;
2624 }
2625 
2626 /**
2627  *  ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
2628  *  @hw: pointer to hardware structure
2629  *
2630  *  Enable flow control according to IEEE clause 37.
2631  **/
2632 static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2633 {
2634 	u32 links2, anlp1_reg, autoc_reg, links;
2635 	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2636 
2637 	/*
2638 	 * On backplane, bail out if
2639 	 * - backplane autoneg was not completed, or if
2640 	 * - we are 82599 and link partner is not AN enabled
2641 	 */
2642 	links = IXGBE_READ_REG(hw, IXGBE_LINKS);
2643 	if ((links & IXGBE_LINKS_KX_AN_COMP) == 0)
2644 		goto out;
2645 
2646 	if (hw->mac.type == ixgbe_mac_82599EB) {
2647 		links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2648 		if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)
2649 			goto out;
2650 	}
2651 	/*
2652 	 * Read the 10g AN autoc and LP ability registers and resolve
2653 	 * local flow control settings accordingly
2654 	 */
2655 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2656 	anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2657 
2658 	ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
2659 		anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
2660 		IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
2661 
2662 out:
2663 	return ret_val;
2664 }
2665 
2666 /**
2667  *  ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
2668  *  @hw: pointer to hardware structure
2669  *
2670  *  Enable flow control according to IEEE clause 37.
2671  **/
2672 static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
2673 {
2674 	u16 technology_ability_reg = 0;
2675 	u16 lp_technology_ability_reg = 0;
2676 
2677 	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
2678 			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2679 			     &technology_ability_reg);
2680 	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
2681 			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2682 			     &lp_technology_ability_reg);
2683 
2684 	return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
2685 				  (u32)lp_technology_ability_reg,
2686 				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
2687 				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
2688 }
2689 
2690 /**
2691  *  ixgbe_fc_autoneg - Configure flow control
2692  *  @hw: pointer to hardware structure
2693  *
2694  *  Compares our advertised flow control capabilities to those advertised by
2695  *  our link partner, and determines the proper flow control mode to use.
2696  **/
2697 void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2698 {
2699 	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2700 	ixgbe_link_speed speed;
2701 	bool link_up;
2702 
2703 	DEBUGFUNC("ixgbe_fc_autoneg");
2704 
2705 	/*
2706 	 * AN should have completed when the cable was plugged in.
2707 	 * Look for reasons to bail out.  Bail out if:
2708 	 * - FC autoneg is disabled, or if
2709 	 * - link is not up.
2710 	 */
2711 	if (hw->fc.disable_fc_autoneg)
2712 		goto out;
2713 
2714 	hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
2715 	if (!link_up)
2716 		goto out;
2717 
2718 	switch (hw->phy.media_type) {
2719 	/* Autoneg flow control on fiber adapters */
2720 	case ixgbe_media_type_fiber:
2721 		if (speed == IXGBE_LINK_SPEED_1GB_FULL)
2722 			ret_val = ixgbe_fc_autoneg_fiber(hw);
2723 		break;
2724 
2725 	/* Autoneg flow control on backplane adapters */
2726 	case ixgbe_media_type_backplane:
2727 		ret_val = ixgbe_fc_autoneg_backplane(hw);
2728 		break;
2729 
2730 	/* Autoneg flow control on copper adapters */
2731 	case ixgbe_media_type_copper:
2732 		if (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)
2733 			ret_val = ixgbe_fc_autoneg_copper(hw);
2734 		break;
2735 
2736 	default:
2737 		break;
2738 	}
2739 
2740 out:
2741 	if (ret_val == IXGBE_SUCCESS) {
2742 		hw->fc.fc_was_autonegged = TRUE;
2743 	} else {
2744 		hw->fc.fc_was_autonegged = FALSE;
2745 		hw->fc.current_mode = hw->fc.requested_mode;
2746 	}
2747 }
2748 
2749 /**
2750  *  ixgbe_disable_pcie_master - Disable PCI-express master access
2751  *  @hw: pointer to hardware structure
2752  *
2753  *  Disables PCI-Express master access and verifies there are no pending
2754  *  requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
2755  *  bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
2756  *  is returned signifying master requests disabled.
2757  **/
2758 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2759 {
2760 	s32 status = IXGBE_SUCCESS;
2761 	u32 i;
2762 
2763 	DEBUGFUNC("ixgbe_disable_pcie_master");
2764 
2765 	/* Always set this bit to ensure any future transactions are blocked */
2766 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
2767 
2768 	/* Exit if master requets are blocked */
2769 	if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2770 		goto out;
2771 
2772 	/* Poll for master request bit to clear */
2773 	for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2774 		usec_delay(100);
2775 		if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2776 			goto out;
2777 	}
2778 
2779 	/*
2780 	 * Two consecutive resets are required via CTRL.RST per datasheet
2781 	 * 5.2.5.3.2 Master Disable.  We set a flag to inform the reset routine
2782 	 * of this need.  The first reset prevents new master requests from
2783 	 * being issued by our device.  We then must wait 1usec or more for any
2784 	 * remaining completions from the PCIe bus to trickle in, and then reset
2785 	 * again to clear out any effects they may have had on our device.
2786 	 */
2787 	DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
2788 	hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2789 
2790 	/*
2791 	 * Before proceeding, make sure that the PCIe block does not have
2792 	 * transactions pending.
2793 	 */
2794 	for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2795 		usec_delay(100);
2796 		if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
2797 		    IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
2798 			goto out;
2799 	}
2800 
2801 	DEBUGOUT("PCIe transaction pending bit also did not clear.\n");
2802 	status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
2803 
2804 out:
2805 	return status;
2806 }
2807 
2808 /**
2809  *  ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
2810  *  @hw: pointer to hardware structure
2811  *  @mask: Mask to specify which semaphore to acquire
2812  *
2813  *  Acquires the SWFW semaphore through the GSSR register for the specified
2814  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
2815  **/
2816 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2817 {
2818 	u32 gssr;
2819 	u32 swmask = mask;
2820 	u32 fwmask = mask << 5;
2821 	s32 timeout = 200;
2822 
2823 	DEBUGFUNC("ixgbe_acquire_swfw_sync");
2824 
2825 	while (timeout) {
2826 		/*
2827 		 * SW EEPROM semaphore bit is used for access to all
2828 		 * SW_FW_SYNC/GSSR bits (not just EEPROM)
2829 		 */
2830 		if (ixgbe_get_eeprom_semaphore(hw))
2831 			return IXGBE_ERR_SWFW_SYNC;
2832 
2833 		gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2834 		if (!(gssr & (fwmask | swmask)))
2835 			break;
2836 
2837 		/*
2838 		 * Firmware currently using resource (fwmask) or other software
2839 		 * thread currently using resource (swmask)
2840 		 */
2841 		ixgbe_release_eeprom_semaphore(hw);
2842 		msec_delay(5);
2843 		timeout--;
2844 	}
2845 
2846 	if (!timeout) {
2847 		DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
2848 		return IXGBE_ERR_SWFW_SYNC;
2849 	}
2850 
2851 	gssr |= swmask;
2852 	IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2853 
2854 	ixgbe_release_eeprom_semaphore(hw);
2855 	return IXGBE_SUCCESS;
2856 }
2857 
2858 /**
2859  *  ixgbe_release_swfw_sync - Release SWFW semaphore
2860  *  @hw: pointer to hardware structure
2861  *  @mask: Mask to specify which semaphore to release
2862  *
2863  *  Releases the SWFW semaphore through the GSSR register for the specified
2864  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
2865  **/
2866 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2867 {
2868 	u32 gssr;
2869 	u32 swmask = mask;
2870 
2871 	DEBUGFUNC("ixgbe_release_swfw_sync");
2872 
2873 	ixgbe_get_eeprom_semaphore(hw);
2874 
2875 	gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2876 	gssr &= ~swmask;
2877 	IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2878 
2879 	ixgbe_release_eeprom_semaphore(hw);
2880 }
2881 
2882 /**
2883  *  ixgbe_disable_sec_rx_path_generic - Stops the receive data path
2884  *  @hw: pointer to hardware structure
2885  *
2886  *  Stops the receive data path and waits for the HW to internally empty
2887  *  the Rx security block
2888  **/
2889 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
2890 {
2891 #define IXGBE_MAX_SECRX_POLL 40
2892 
2893 	int i;
2894 	int secrxreg;
2895 
2896 	DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
2897 
2898 
2899 	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2900 	secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
2901 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2902 	for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
2903 		secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
2904 		if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
2905 			break;
2906 		else
2907 			/* Use interrupt-safe sleep just in case */
2908 			usec_delay(1000);
2909 	}
2910 
2911 	/* For informational purposes only */
2912 	if (i >= IXGBE_MAX_SECRX_POLL)
2913 		DEBUGOUT("Rx unit being enabled before security "
2914 			 "path fully disabled.  Continuing with init.\n");
2915 
2916 	return IXGBE_SUCCESS;
2917 }
2918 
2919 /**
2920  *  ixgbe_enable_sec_rx_path_generic - Enables the receive data path
2921  *  @hw: pointer to hardware structure
2922  *
2923  *  Enables the receive data path.
2924  **/
2925 s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
2926 {
2927 	int secrxreg;
2928 
2929 	DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
2930 
2931 	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2932 	secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
2933 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2934 	IXGBE_WRITE_FLUSH(hw);
2935 
2936 	return IXGBE_SUCCESS;
2937 }
2938 
2939 /**
2940  *  ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
2941  *  @hw: pointer to hardware structure
2942  *  @regval: register value to write to RXCTRL
2943  *
2944  *  Enables the Rx DMA unit
2945  **/
2946 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
2947 {
2948 	DEBUGFUNC("ixgbe_enable_rx_dma_generic");
2949 
2950 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
2951 
2952 	return IXGBE_SUCCESS;
2953 }
2954 
2955 /**
2956  *  ixgbe_blink_led_start_generic - Blink LED based on index.
2957  *  @hw: pointer to hardware structure
2958  *  @index: led number to blink
2959  **/
2960 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
2961 {
2962 	ixgbe_link_speed speed = 0;
2963 	bool link_up = 0;
2964 	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2965 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2966 
2967 	DEBUGFUNC("ixgbe_blink_led_start_generic");
2968 
2969 	/*
2970 	 * Link must be up to auto-blink the LEDs;
2971 	 * Force it if link is down.
2972 	 */
2973 	hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
2974 
2975 	if (!link_up) {
2976 		autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2977 		autoc_reg |= IXGBE_AUTOC_FLU;
2978 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2979 		IXGBE_WRITE_FLUSH(hw);
2980 		msec_delay(10);
2981 	}
2982 
2983 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
2984 	led_reg |= IXGBE_LED_BLINK(index);
2985 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2986 	IXGBE_WRITE_FLUSH(hw);
2987 
2988 	return IXGBE_SUCCESS;
2989 }
2990 
2991 /**
2992  *  ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
2993  *  @hw: pointer to hardware structure
2994  *  @index: led number to stop blinking
2995  **/
2996 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
2997 {
2998 	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2999 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3000 
3001 	DEBUGFUNC("ixgbe_blink_led_stop_generic");
3002 
3003 
3004 	autoc_reg &= ~IXGBE_AUTOC_FLU;
3005 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3006 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
3007 
3008 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
3009 	led_reg &= ~IXGBE_LED_BLINK(index);
3010 	led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
3011 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3012 	IXGBE_WRITE_FLUSH(hw);
3013 
3014 	return IXGBE_SUCCESS;
3015 }
3016 
3017 /**
3018  *  ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
3019  *  @hw: pointer to hardware structure
3020  *  @san_mac_offset: SAN MAC address offset
3021  *
3022  *  This function will read the EEPROM location for the SAN MAC address
3023  *  pointer, and returns the value at that location.  This is used in both
3024  *  get and set mac_addr routines.
3025  **/
3026 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
3027 					 u16 *san_mac_offset)
3028 {
3029 	DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
3030 
3031 	/*
3032 	 * First read the EEPROM pointer to see if the MAC addresses are
3033 	 * available.
3034 	 */
3035 	hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
3036 
3037 	return IXGBE_SUCCESS;
3038 }
3039 
3040 /**
3041  *  ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
3042  *  @hw: pointer to hardware structure
3043  *  @san_mac_addr: SAN MAC address
3044  *
3045  *  Reads the SAN MAC address from the EEPROM, if it's available.  This is
3046  *  per-port, so set_lan_id() must be called before reading the addresses.
3047  *  set_lan_id() is called by identify_sfp(), but this cannot be relied
3048  *  upon for non-SFP connections, so we must call it here.
3049  **/
3050 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3051 {
3052 	u16 san_mac_data, san_mac_offset;
3053 	u8 i;
3054 
3055 	DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
3056 
3057 	/*
3058 	 * First read the EEPROM pointer to see if the MAC addresses are
3059 	 * available.  If they're not, no point in calling set_lan_id() here.
3060 	 */
3061 	ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3062 
3063 	if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
3064 		/*
3065 		 * No addresses available in this EEPROM.  It's not an
3066 		 * error though, so just wipe the local address and return.
3067 		 */
3068 		for (i = 0; i < 6; i++)
3069 			san_mac_addr[i] = 0xFF;
3070 
3071 		goto san_mac_addr_out;
3072 	}
3073 
3074 	/* make sure we know which port we need to program */
3075 	hw->mac.ops.set_lan_id(hw);
3076 	/* apply the port offset to the address offset */
3077 	(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3078 			 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3079 	for (i = 0; i < 3; i++) {
3080 		hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
3081 		san_mac_addr[i * 2] = (u8)(san_mac_data);
3082 		san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
3083 		san_mac_offset++;
3084 	}
3085 
3086 san_mac_addr_out:
3087 	return IXGBE_SUCCESS;
3088 }
3089 
3090 /**
3091  *  ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
3092  *  @hw: pointer to hardware structure
3093  *  @san_mac_addr: SAN MAC address
3094  *
3095  *  Write a SAN MAC address to the EEPROM.
3096  **/
3097 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3098 {
3099 	s32 status = IXGBE_SUCCESS;
3100 	u16 san_mac_data, san_mac_offset;
3101 	u8 i;
3102 
3103 	DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
3104 
3105 	/* Look for SAN mac address pointer.  If not defined, return */
3106 	ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3107 
3108 	if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
3109 		status = IXGBE_ERR_NO_SAN_ADDR_PTR;
3110 		goto san_mac_addr_out;
3111 	}
3112 
3113 	/* Make sure we know which port we need to write */
3114 	hw->mac.ops.set_lan_id(hw);
3115 	/* Apply the port offset to the address offset */
3116 	(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3117 			 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3118 
3119 	for (i = 0; i < 3; i++) {
3120 		san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
3121 		san_mac_data |= (u16)(san_mac_addr[i * 2]);
3122 		hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
3123 		san_mac_offset++;
3124 	}
3125 
3126 san_mac_addr_out:
3127 	return status;
3128 }
3129 
3130 /**
3131  *  ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
3132  *  @hw: pointer to hardware structure
3133  *
3134  *  Read PCIe configuration space, and get the MSI-X vector count from
3135  *  the capabilities table.
3136  **/
3137 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
3138 {
3139 	u16 msix_count = 1;
3140 	u16 max_msix_count;
3141 	u16 pcie_offset;
3142 
3143 	switch (hw->mac.type) {
3144 	case ixgbe_mac_82598EB:
3145 		pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
3146 		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
3147 		break;
3148 	case ixgbe_mac_82599EB:
3149 	case ixgbe_mac_X540:
3150 		pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
3151 		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
3152 		break;
3153 	default:
3154 		return msix_count;
3155 	}
3156 
3157 	DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
3158 	msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
3159 	msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
3160 
3161 	/* MSI-X count is zero-based in HW */
3162 	msix_count++;
3163 
3164 	if (msix_count > max_msix_count)
3165 		msix_count = max_msix_count;
3166 
3167 	return msix_count;
3168 }
3169 
3170 /**
3171  *  ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
3172  *  @hw: pointer to hardware structure
3173  *  @addr: Address to put into receive address register
3174  *  @vmdq: VMDq pool to assign
3175  *
3176  *  Puts an ethernet address into a receive address register, or
3177  *  finds the rar that it is aleady in; adds to the pool list
3178  **/
3179 s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
3180 {
3181 	static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
3182 	u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
3183 	u32 rar;
3184 	u32 rar_low, rar_high;
3185 	u32 addr_low, addr_high;
3186 
3187 	DEBUGFUNC("ixgbe_insert_mac_addr_generic");
3188 
3189 	/* swap bytes for HW little endian */
3190 	addr_low  = addr[0] | (addr[1] << 8)
3191 			    | (addr[2] << 16)
3192 			    | (addr[3] << 24);
3193 	addr_high = addr[4] | (addr[5] << 8);
3194 
3195 	/*
3196 	 * Either find the mac_id in rar or find the first empty space.
3197 	 * rar_highwater points to just after the highest currently used
3198 	 * rar in order to shorten the search.  It grows when we add a new
3199 	 * rar to the top.
3200 	 */
3201 	for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
3202 		rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
3203 
3204 		if (((IXGBE_RAH_AV & rar_high) == 0)
3205 		    && first_empty_rar == NO_EMPTY_RAR_FOUND) {
3206 			first_empty_rar = rar;
3207 		} else if ((rar_high & 0xFFFF) == addr_high) {
3208 			rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
3209 			if (rar_low == addr_low)
3210 				break;    /* found it already in the rars */
3211 		}
3212 	}
3213 
3214 	if (rar < hw->mac.rar_highwater) {
3215 		/* already there so just add to the pool bits */
3216 		ixgbe_set_vmdq(hw, rar, vmdq);
3217 	} else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3218 		/* stick it into first empty RAR slot we found */
3219 		rar = first_empty_rar;
3220 		ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3221 	} else if (rar == hw->mac.rar_highwater) {
3222 		/* add it to the top of the list and inc the highwater mark */
3223 		ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3224 		hw->mac.rar_highwater++;
3225 	} else if (rar >= hw->mac.num_rar_entries) {
3226 		return IXGBE_ERR_INVALID_MAC_ADDR;
3227 	}
3228 
3229 	/*
3230 	 * If we found rar[0], make sure the default pool bit (we use pool 0)
3231 	 * remains cleared to be sure default pool packets will get delivered
3232 	 */
3233 	if (rar == 0)
3234 		ixgbe_clear_vmdq(hw, rar, 0);
3235 
3236 	return rar;
3237 }
3238 
3239 /**
3240  *  ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3241  *  @hw: pointer to hardware struct
3242  *  @rar: receive address register index to disassociate
3243  *  @vmdq: VMDq pool index to remove from the rar
3244  **/
3245 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3246 {
3247 	u32 mpsar_lo, mpsar_hi;
3248 	u32 rar_entries = hw->mac.num_rar_entries;
3249 
3250 	DEBUGFUNC("ixgbe_clear_vmdq_generic");
3251 
3252 	/* Make sure we are using a valid rar index range */
3253 	if (rar >= rar_entries) {
3254 		DEBUGOUT1("RAR index %d is out of range.\n", rar);
3255 		return IXGBE_ERR_INVALID_ARGUMENT;
3256 	}
3257 
3258 	mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3259 	mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3260 
3261 	if (!mpsar_lo && !mpsar_hi)
3262 		goto done;
3263 
3264 	if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
3265 		if (mpsar_lo) {
3266 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3267 			mpsar_lo = 0;
3268 		}
3269 		if (mpsar_hi) {
3270 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3271 			mpsar_hi = 0;
3272 		}
3273 	} else if (vmdq < 32) {
3274 		mpsar_lo &= ~(1 << vmdq);
3275 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
3276 	} else {
3277 		mpsar_hi &= ~(1 << (vmdq - 32));
3278 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
3279 	}
3280 
3281 	/* was that the last pool using this rar? */
3282 	if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
3283 		hw->mac.ops.clear_rar(hw, rar);
3284 done:
3285 	return IXGBE_SUCCESS;
3286 }
3287 
3288 /**
3289  *  ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
3290  *  @hw: pointer to hardware struct
3291  *  @rar: receive address register index to associate with a VMDq index
3292  *  @vmdq: VMDq pool index
3293  **/
3294 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3295 {
3296 	u32 mpsar;
3297 	u32 rar_entries = hw->mac.num_rar_entries;
3298 
3299 	DEBUGFUNC("ixgbe_set_vmdq_generic");
3300 
3301 	/* Make sure we are using a valid rar index range */
3302 	if (rar >= rar_entries) {
3303 		DEBUGOUT1("RAR index %d is out of range.\n", rar);
3304 		return IXGBE_ERR_INVALID_ARGUMENT;
3305 	}
3306 
3307 	if (vmdq < 32) {
3308 		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3309 		mpsar |= 1 << vmdq;
3310 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3311 	} else {
3312 		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3313 		mpsar |= 1 << (vmdq - 32);
3314 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3315 	}
3316 	return IXGBE_SUCCESS;
3317 }
3318 
3319 /**
3320  *  This function should only be involved in the IOV mode.
3321  *  In IOV mode, Default pool is next pool after the number of
3322  *  VFs advertized and not 0.
3323  *  MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
3324  *
3325  *  ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
3326  *  @hw: pointer to hardware struct
3327  *  @vmdq: VMDq pool index
3328  **/
3329 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
3330 {
3331 	u32 rar = hw->mac.san_mac_rar_index;
3332 
3333 	DEBUGFUNC("ixgbe_set_vmdq_san_mac");
3334 
3335 	if (vmdq < 32) {
3336 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
3337 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3338 	} else {
3339 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3340 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
3341 	}
3342 
3343 	return IXGBE_SUCCESS;
3344 }
3345 
3346 /**
3347  *  ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3348  *  @hw: pointer to hardware structure
3349  **/
3350 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3351 {
3352 	int i;
3353 
3354 	DEBUGFUNC("ixgbe_init_uta_tables_generic");
3355 	DEBUGOUT(" Clearing UTA\n");
3356 
3357 	for (i = 0; i < 128; i++)
3358 		IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3359 
3360 	return IXGBE_SUCCESS;
3361 }
3362 
3363 /**
3364  *  ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3365  *  @hw: pointer to hardware structure
3366  *  @vlan: VLAN id to write to VLAN filter
3367  *
3368  *  return the VLVF index where this VLAN id should be placed
3369  *
3370  **/
3371 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
3372 {
3373 	u32 bits = 0;
3374 	u32 first_empty_slot = 0;
3375 	s32 regindex;
3376 
3377 	/* short cut the special case */
3378 	if (vlan == 0)
3379 		return 0;
3380 
3381 	/*
3382 	  * Search for the vlan id in the VLVF entries. Save off the first empty
3383 	  * slot found along the way
3384 	  */
3385 	for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
3386 		bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3387 		if (!bits && !(first_empty_slot))
3388 			first_empty_slot = regindex;
3389 		else if ((bits & 0x0FFF) == vlan)
3390 			break;
3391 	}
3392 
3393 	/*
3394 	  * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
3395 	  * in the VLVF. Else use the first empty VLVF register for this
3396 	  * vlan id.
3397 	  */
3398 	if (regindex >= IXGBE_VLVF_ENTRIES) {
3399 		if (first_empty_slot)
3400 			regindex = first_empty_slot;
3401 		else {
3402 			DEBUGOUT("No space in VLVF.\n");
3403 			regindex = IXGBE_ERR_NO_SPACE;
3404 		}
3405 	}
3406 
3407 	return regindex;
3408 }
3409 
3410 /**
3411  *  ixgbe_set_vfta_generic - Set VLAN filter table
3412  *  @hw: pointer to hardware structure
3413  *  @vlan: VLAN id to write to VLAN filter
3414  *  @vind: VMDq output index that maps queue to VLAN id in VFVFB
3415  *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
3416  *
3417  *  Turn on/off specified VLAN in the VLAN filter table.
3418  **/
3419 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3420 			   bool vlan_on)
3421 {
3422 	s32 regindex;
3423 	u32 bitindex;
3424 	u32 vfta;
3425 	u32 targetbit;
3426 	s32 ret_val = IXGBE_SUCCESS;
3427 	bool vfta_changed = FALSE;
3428 
3429 	DEBUGFUNC("ixgbe_set_vfta_generic");
3430 
3431 	if (vlan > 4095)
3432 		return IXGBE_ERR_PARAM;
3433 
3434 	/*
3435 	 * this is a 2 part operation - first the VFTA, then the
3436 	 * VLVF and VLVFB if VT Mode is set
3437 	 * We don't write the VFTA until we know the VLVF part succeeded.
3438 	 */
3439 
3440 	/* Part 1
3441 	 * The VFTA is a bitstring made up of 128 32-bit registers
3442 	 * that enable the particular VLAN id, much like the MTA:
3443 	 *    bits[11-5]: which register
3444 	 *    bits[4-0]:  which bit in the register
3445 	 */
3446 	regindex = (vlan >> 5) & 0x7F;
3447 	bitindex = vlan & 0x1F;
3448 	targetbit = (1 << bitindex);
3449 	vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
3450 
3451 	if (vlan_on) {
3452 		if (!(vfta & targetbit)) {
3453 			vfta |= targetbit;
3454 			vfta_changed = TRUE;
3455 		}
3456 	} else {
3457 		if ((vfta & targetbit)) {
3458 			vfta &= ~targetbit;
3459 			vfta_changed = TRUE;
3460 		}
3461 	}
3462 
3463 	/* Part 2
3464 	 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
3465 	 */
3466 	ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on,
3467 					 &vfta_changed);
3468 	if (ret_val != IXGBE_SUCCESS)
3469 		return ret_val;
3470 
3471 	if (vfta_changed)
3472 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
3473 
3474 	return IXGBE_SUCCESS;
3475 }
3476 
3477 /**
3478  *  ixgbe_set_vlvf_generic - Set VLAN Pool Filter
3479  *  @hw: pointer to hardware structure
3480  *  @vlan: VLAN id to write to VLAN filter
3481  *  @vind: VMDq output index that maps queue to VLAN id in VFVFB
3482  *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
3483  *  @vfta_changed: pointer to boolean flag which indicates whether VFTA
3484  *                 should be changed
3485  *
3486  *  Turn on/off specified bit in VLVF table.
3487  **/
3488 s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3489 			    bool vlan_on, bool *vfta_changed)
3490 {
3491 	u32 vt;
3492 
3493 	DEBUGFUNC("ixgbe_set_vlvf_generic");
3494 
3495 	if (vlan > 4095)
3496 		return IXGBE_ERR_PARAM;
3497 
3498 	/* If VT Mode is set
3499 	 *   Either vlan_on
3500 	 *     make sure the vlan is in VLVF
3501 	 *     set the vind bit in the matching VLVFB
3502 	 *   Or !vlan_on
3503 	 *     clear the pool bit and possibly the vind
3504 	 */
3505 	vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3506 	if (vt & IXGBE_VT_CTL_VT_ENABLE) {
3507 		s32 vlvf_index;
3508 		u32 bits;
3509 
3510 		vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
3511 		if (vlvf_index < 0)
3512 			return vlvf_index;
3513 
3514 		if (vlan_on) {
3515 			/* set the pool bit */
3516 			if (vind < 32) {
3517 				bits = IXGBE_READ_REG(hw,
3518 						IXGBE_VLVFB(vlvf_index * 2));
3519 				bits |= (1 << vind);
3520 				IXGBE_WRITE_REG(hw,
3521 						IXGBE_VLVFB(vlvf_index * 2),
3522 						bits);
3523 			} else {
3524 				bits = IXGBE_READ_REG(hw,
3525 					IXGBE_VLVFB((vlvf_index * 2) + 1));
3526 				bits |= (1 << (vind - 32));
3527 				IXGBE_WRITE_REG(hw,
3528 					IXGBE_VLVFB((vlvf_index * 2) + 1),
3529 					bits);
3530 			}
3531 		} else {
3532 			/* clear the pool bit */
3533 			if (vind < 32) {
3534 				bits = IXGBE_READ_REG(hw,
3535 						IXGBE_VLVFB(vlvf_index * 2));
3536 				bits &= ~(1 << vind);
3537 				IXGBE_WRITE_REG(hw,
3538 						IXGBE_VLVFB(vlvf_index * 2),
3539 						bits);
3540 				bits |= IXGBE_READ_REG(hw,
3541 					IXGBE_VLVFB((vlvf_index * 2) + 1));
3542 			} else {
3543 				bits = IXGBE_READ_REG(hw,
3544 					IXGBE_VLVFB((vlvf_index * 2) + 1));
3545 				bits &= ~(1 << (vind - 32));
3546 				IXGBE_WRITE_REG(hw,
3547 					IXGBE_VLVFB((vlvf_index * 2) + 1),
3548 					bits);
3549 				bits |= IXGBE_READ_REG(hw,
3550 						IXGBE_VLVFB(vlvf_index * 2));
3551 			}
3552 		}
3553 
3554 		/*
3555 		 * If there are still bits set in the VLVFB registers
3556 		 * for the VLAN ID indicated we need to see if the
3557 		 * caller is requesting that we clear the VFTA entry bit.
3558 		 * If the caller has requested that we clear the VFTA
3559 		 * entry bit but there are still pools/VFs using this VLAN
3560 		 * ID entry then ignore the request.  We're not worried
3561 		 * about the case where we're turning the VFTA VLAN ID
3562 		 * entry bit on, only when requested to turn it off as
3563 		 * there may be multiple pools and/or VFs using the
3564 		 * VLAN ID entry.  In that case we cannot clear the
3565 		 * VFTA bit until all pools/VFs using that VLAN ID have also
3566 		 * been cleared.  This will be indicated by "bits" being
3567 		 * zero.
3568 		 */
3569 		if (bits) {
3570 			IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
3571 					(IXGBE_VLVF_VIEN | vlan));
3572 			if ((!vlan_on) && (vfta_changed != NULL)) {
3573 				/* someone wants to clear the vfta entry
3574 				 * but some pools/VFs are still using it.
3575 				 * Ignore it. */
3576 				*vfta_changed = FALSE;
3577 			}
3578 		} else
3579 			IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
3580 	}
3581 
3582 	return IXGBE_SUCCESS;
3583 }
3584 
3585 /**
3586  *  ixgbe_clear_vfta_generic - Clear VLAN filter table
3587  *  @hw: pointer to hardware structure
3588  *
3589  *  Clears the VLAN filer table, and the VMDq index associated with the filter
3590  **/
3591 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
3592 {
3593 	u32 offset;
3594 
3595 	DEBUGFUNC("ixgbe_clear_vfta_generic");
3596 
3597 	for (offset = 0; offset < hw->mac.vft_size; offset++)
3598 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
3599 
3600 	for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
3601 		IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
3602 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
3603 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
3604 	}
3605 
3606 	return IXGBE_SUCCESS;
3607 }
3608 
3609 /**
3610  *  ixgbe_check_mac_link_generic - Determine link and speed status
3611  *  @hw: pointer to hardware structure
3612  *  @speed: pointer to link speed
3613  *  @link_up: TRUE when link is up
3614  *  @link_up_wait_to_complete: bool used to wait for link up or not
3615  *
3616  *  Reads the links register to determine if link is up and the current speed
3617  **/
3618 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3619 				 bool *link_up, bool link_up_wait_to_complete)
3620 {
3621 	u32 links_reg, links_orig;
3622 	u32 i;
3623 
3624 	DEBUGFUNC("ixgbe_check_mac_link_generic");
3625 
3626 	/* clear the old state */
3627 	links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
3628 
3629 	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3630 
3631 	if (links_orig != links_reg) {
3632 		DEBUGOUT2("LINKS changed from %08X to %08X\n",
3633 			  links_orig, links_reg);
3634 	}
3635 
3636 	if (link_up_wait_to_complete) {
3637 		for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
3638 			if (links_reg & IXGBE_LINKS_UP) {
3639 				*link_up = TRUE;
3640 				break;
3641 			} else {
3642 				*link_up = FALSE;
3643 			}
3644 			msec_delay(100);
3645 			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3646 		}
3647 	} else {
3648 		if (links_reg & IXGBE_LINKS_UP)
3649 			*link_up = TRUE;
3650 		else
3651 			*link_up = FALSE;
3652 	}
3653 
3654 	if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3655 	    IXGBE_LINKS_SPEED_10G_82599)
3656 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
3657 	else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3658 		 IXGBE_LINKS_SPEED_1G_82599)
3659 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
3660 	else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3661 		 IXGBE_LINKS_SPEED_100_82599)
3662 		*speed = IXGBE_LINK_SPEED_100_FULL;
3663 	else
3664 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
3665 
3666 	return IXGBE_SUCCESS;
3667 }
3668 
3669 /**
3670  *  ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
3671  *  the EEPROM
3672  *  @hw: pointer to hardware structure
3673  *  @wwnn_prefix: the alternative WWNN prefix
3674  *  @wwpn_prefix: the alternative WWPN prefix
3675  *
3676  *  This function will read the EEPROM from the alternative SAN MAC address
3677  *  block to check the support for the alternative WWNN/WWPN prefix support.
3678  **/
3679 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
3680 				 u16 *wwpn_prefix)
3681 {
3682 	u16 offset, caps;
3683 	u16 alt_san_mac_blk_offset;
3684 
3685 	DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
3686 
3687 	/* clear output first */
3688 	*wwnn_prefix = 0xFFFF;
3689 	*wwpn_prefix = 0xFFFF;
3690 
3691 	/* check if alternative SAN MAC is supported */
3692 	hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
3693 			    &alt_san_mac_blk_offset);
3694 
3695 	if ((alt_san_mac_blk_offset == 0) ||
3696 	    (alt_san_mac_blk_offset == 0xFFFF))
3697 		goto wwn_prefix_out;
3698 
3699 	/* check capability in alternative san mac address block */
3700 	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
3701 	hw->eeprom.ops.read(hw, offset, &caps);
3702 	if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
3703 		goto wwn_prefix_out;
3704 
3705 	/* get the corresponding prefix for WWNN/WWPN */
3706 	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
3707 	hw->eeprom.ops.read(hw, offset, wwnn_prefix);
3708 
3709 	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
3710 	hw->eeprom.ops.read(hw, offset, wwpn_prefix);
3711 
3712 wwn_prefix_out:
3713 	return IXGBE_SUCCESS;
3714 }
3715 
3716 /**
3717  *  ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
3718  *  @hw: pointer to hardware structure
3719  *  @bs: the fcoe boot status
3720  *
3721  *  This function will read the FCOE boot status from the iSCSI FCOE block
3722  **/
3723 s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
3724 {
3725 	u16 offset, caps, flags;
3726 	s32 status;
3727 
3728 	DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
3729 
3730 	/* clear output first */
3731 	*bs = ixgbe_fcoe_bootstatus_unavailable;
3732 
3733 	/* check if FCOE IBA block is present */
3734 	offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
3735 	status = hw->eeprom.ops.read(hw, offset, &caps);
3736 	if (status != IXGBE_SUCCESS)
3737 		goto out;
3738 
3739 	if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
3740 		goto out;
3741 
3742 	/* check if iSCSI FCOE block is populated */
3743 	status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
3744 	if (status != IXGBE_SUCCESS)
3745 		goto out;
3746 
3747 	if ((offset == 0) || (offset == 0xFFFF))
3748 		goto out;
3749 
3750 	/* read fcoe flags in iSCSI FCOE block */
3751 	offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
3752 	status = hw->eeprom.ops.read(hw, offset, &flags);
3753 	if (status != IXGBE_SUCCESS)
3754 		goto out;
3755 
3756 	if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
3757 		*bs = ixgbe_fcoe_bootstatus_enabled;
3758 	else
3759 		*bs = ixgbe_fcoe_bootstatus_disabled;
3760 
3761 out:
3762 	return status;
3763 }
3764 
3765 /**
3766  *  ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
3767  *  @hw: pointer to hardware structure
3768  *  @enable: enable or disable switch for anti-spoofing
3769  *  @pf: Physical Function pool - do not enable anti-spoofing for the PF
3770  *
3771  **/
3772 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
3773 {
3774 	int j;
3775 	int pf_target_reg = pf >> 3;
3776 	int pf_target_shift = pf % 8;
3777 	u32 pfvfspoof = 0;
3778 
3779 	if (hw->mac.type == ixgbe_mac_82598EB)
3780 		return;
3781 
3782 	if (enable)
3783 		pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
3784 
3785 	/*
3786 	 * PFVFSPOOF register array is size 8 with 8 bits assigned to
3787 	 * MAC anti-spoof enables in each register array element.
3788 	 */
3789 	for (j = 0; j < pf_target_reg; j++)
3790 		IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
3791 
3792 	/*
3793 	 * The PF should be allowed to spoof so that it can support
3794 	 * emulation mode NICs.  Do not set the bits assigned to the PF
3795 	 */
3796 	pfvfspoof &= (1 << pf_target_shift) - 1;
3797 	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
3798 
3799 	/*
3800 	 * Remaining pools belong to the PF so they do not need to have
3801 	 * anti-spoofing enabled.
3802 	 */
3803 	for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
3804 		IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
3805 }
3806 
3807 /**
3808  *  ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
3809  *  @hw: pointer to hardware structure
3810  *  @enable: enable or disable switch for VLAN anti-spoofing
3811  *  @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
3812  *
3813  **/
3814 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
3815 {
3816 	int vf_target_reg = vf >> 3;
3817 	int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
3818 	u32 pfvfspoof;
3819 
3820 	if (hw->mac.type == ixgbe_mac_82598EB)
3821 		return;
3822 
3823 	pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
3824 	if (enable)
3825 		pfvfspoof |= (1 << vf_target_shift);
3826 	else
3827 		pfvfspoof &= ~(1 << vf_target_shift);
3828 	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
3829 }
3830 
3831 /**
3832  *  ixgbe_get_device_caps_generic - Get additional device capabilities
3833  *  @hw: pointer to hardware structure
3834  *  @device_caps: the EEPROM word with the extra device capabilities
3835  *
3836  *  This function will read the EEPROM location for the device capabilities,
3837  *  and return the word through device_caps.
3838  **/
3839 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
3840 {
3841 	DEBUGFUNC("ixgbe_get_device_caps_generic");
3842 
3843 	hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
3844 
3845 	return IXGBE_SUCCESS;
3846 }
3847 
3848 /**
3849  *  ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
3850  *  @hw: pointer to hardware structure
3851  *
3852  **/
3853 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
3854 {
3855 	u32 regval;
3856 	u32 i;
3857 
3858 	DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
3859 
3860 	/* Enable relaxed ordering */
3861 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
3862 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
3863 		regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
3864 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
3865 	}
3866 
3867 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
3868 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
3869 		regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
3870 			  IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
3871 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
3872 	}
3873 
3874 }
3875 
3876 /**
3877  *  ixgbe_calculate_checksum - Calculate checksum for buffer
3878  *  @buffer: pointer to EEPROM
3879  *  @length: size of EEPROM to calculate a checksum for
3880  *  Calculates the checksum for some buffer on a specified length.  The
3881  *  checksum calculated is returned.
3882  **/
3883 static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
3884 {
3885 	u32 i;
3886 	u8 sum = 0;
3887 
3888 	DEBUGFUNC("ixgbe_calculate_checksum");
3889 
3890 	if (!buffer)
3891 		return 0;
3892 
3893 	for (i = 0; i < length; i++)
3894 		sum += buffer[i];
3895 
3896 	return (u8) (0 - sum);
3897 }
3898 
3899 /**
3900  *  ixgbe_host_interface_command - Issue command to manageability block
3901  *  @hw: pointer to the HW structure
3902  *  @buffer: contains the command to write and where the return status will
3903  *   be placed
3904  *  @length: length of buffer, must be multiple of 4 bytes
3905  *
3906  *  Communicates with the manageability block.  On success return IXGBE_SUCCESS
3907  *  else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
3908  **/
3909 static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
3910 					u32 length)
3911 {
3912 	u32 hicr, i, bi;
3913 	u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
3914 	u8 buf_len, dword_len;
3915 
3916 	s32 ret_val = IXGBE_SUCCESS;
3917 
3918 	DEBUGFUNC("ixgbe_host_interface_command");
3919 
3920 	if (length == 0 || length & 0x3 ||
3921 	    length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
3922 		DEBUGOUT("Buffer length failure.\n");
3923 		ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3924 		goto out;
3925 	}
3926 
3927 	/* Check that the host interface is enabled. */
3928 	hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3929 	if ((hicr & IXGBE_HICR_EN) == 0) {
3930 		DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
3931 		ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3932 		goto out;
3933 	}
3934 
3935 	/* Calculate length in DWORDs */
3936 	dword_len = length >> 2;
3937 
3938 	/*
3939 	 * The device driver writes the relevant command block
3940 	 * into the ram area.
3941 	 */
3942 	for (i = 0; i < dword_len; i++)
3943 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
3944 				      i, IXGBE_CPU_TO_LE32(buffer[i]));
3945 
3946 	/* Setting this bit tells the ARC that a new command is pending. */
3947 	IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
3948 
3949 	for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) {
3950 		hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3951 		if (!(hicr & IXGBE_HICR_C))
3952 			break;
3953 		msec_delay(1);
3954 	}
3955 
3956 	/* Check command successful completion. */
3957 	if (i == IXGBE_HI_COMMAND_TIMEOUT ||
3958 	    (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
3959 		DEBUGOUT("Command has failed with no status valid.\n");
3960 		ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3961 		goto out;
3962 	}
3963 
3964 	/* Calculate length in DWORDs */
3965 	dword_len = hdr_size >> 2;
3966 
3967 	/* first pull in the header so we know the buffer length */
3968 	for (bi = 0; bi < dword_len; bi++) {
3969 		buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
3970 		IXGBE_LE32_TO_CPUS(&buffer[bi]);
3971 	}
3972 
3973 	/* If there is any thing in data position pull it in */
3974 	buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
3975 	if (buf_len == 0)
3976 		goto out;
3977 
3978 	if (length < (buf_len + hdr_size)) {
3979 		DEBUGOUT("Buffer not large enough for reply message.\n");
3980 		ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3981 		goto out;
3982 	}
3983 
3984 	/* Calculate length in DWORDs, add 3 for odd lengths */
3985 	dword_len = (buf_len + 3) >> 2;
3986 
3987 	/* Pull in the rest of the buffer (bi is where we left off)*/
3988 	for (; bi <= dword_len; bi++) {
3989 		buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
3990 		IXGBE_LE32_TO_CPUS(&buffer[bi]);
3991 	}
3992 
3993 out:
3994 	return ret_val;
3995 }
3996 
3997 /**
3998  *  ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
3999  *  @hw: pointer to the HW structure
4000  *  @maj: driver version major number
4001  *  @min: driver version minor number
4002  *  @build: driver version build number
4003  *  @sub: driver version sub build number
4004  *
4005  *  Sends driver version number to firmware through the manageability
4006  *  block.  On success return IXGBE_SUCCESS
4007  *  else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4008  *  semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4009  **/
4010 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
4011 				 u8 build, u8 sub)
4012 {
4013 	struct ixgbe_hic_drv_info fw_cmd;
4014 	int i;
4015 	s32 ret_val = IXGBE_SUCCESS;
4016 
4017 	DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
4018 
4019 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM)
4020 	    != IXGBE_SUCCESS) {
4021 		ret_val = IXGBE_ERR_SWFW_SYNC;
4022 		goto out;
4023 	}
4024 
4025 	fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4026 	fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
4027 	fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4028 	fw_cmd.port_num = (u8)hw->bus.func;
4029 	fw_cmd.ver_maj = maj;
4030 	fw_cmd.ver_min = min;
4031 	fw_cmd.ver_build = build;
4032 	fw_cmd.ver_sub = sub;
4033 	fw_cmd.hdr.checksum = 0;
4034 	fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4035 				(FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4036 	fw_cmd.pad = 0;
4037 	fw_cmd.pad2 = 0;
4038 
4039 	for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4040 		ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4041 						       sizeof(fw_cmd));
4042 		if (ret_val != IXGBE_SUCCESS)
4043 			continue;
4044 
4045 		if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4046 		    FW_CEM_RESP_STATUS_SUCCESS)
4047 			ret_val = IXGBE_SUCCESS;
4048 		else
4049 			ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4050 
4051 		break;
4052 	}
4053 
4054 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4055 out:
4056 	return ret_val;
4057 }
4058 
4059 /**
4060  * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
4061  * @hw: pointer to hardware structure
4062  * @num_pb: number of packet buffers to allocate
4063  * @headroom: reserve n KB of headroom
4064  * @strategy: packet buffer allocation strategy
4065  **/
4066 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
4067 			     int strategy)
4068 {
4069 	u32 pbsize = hw->mac.rx_pb_size;
4070 	int i = 0;
4071 	u32 rxpktsize, txpktsize, txpbthresh;
4072 
4073 	/* Reserve headroom */
4074 	pbsize -= headroom;
4075 
4076 	if (!num_pb)
4077 		num_pb = 1;
4078 
4079 	/* Divide remaining packet buffer space amongst the number of packet
4080 	 * buffers requested using supplied strategy.
4081 	 */
4082 	switch (strategy) {
4083 	case PBA_STRATEGY_WEIGHTED:
4084 		/* ixgbe_dcb_pba_80_48 strategy weight first half of packet
4085 		 * buffer with 5/8 of the packet buffer space.
4086 		 */
4087 		rxpktsize = (pbsize * 5) / (num_pb * 4);
4088 		pbsize -= rxpktsize * (num_pb / 2);
4089 		rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
4090 		for (; i < (num_pb / 2); i++)
4091 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4092 		/* Fall through to configure remaining packet buffers */
4093 	case PBA_STRATEGY_EQUAL:
4094 		rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
4095 		for (; i < num_pb; i++)
4096 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4097 		break;
4098 	default:
4099 		break;
4100 	}
4101 
4102 	/* Only support an equally distributed Tx packet buffer strategy. */
4103 	txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
4104 	txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
4105 	for (i = 0; i < num_pb; i++) {
4106 		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4107 		IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4108 	}
4109 
4110 	/* Clear unused TCs, if any, to zero buffer size*/
4111 	for (; i < IXGBE_MAX_PB; i++) {
4112 		IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4113 		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4114 		IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4115 	}
4116 }
4117 
4118 /**
4119  * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
4120  * @hw: pointer to the hardware structure
4121  *
4122  * The 82599 and x540 MACs can experience issues if TX work is still pending
4123  * when a reset occurs.  This function prevents this by flushing the PCIe
4124  * buffers on the system.
4125  **/
4126 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
4127 {
4128 	u32 gcr_ext, hlreg0;
4129 
4130 	/*
4131 	 * If double reset is not requested then all transactions should
4132 	 * already be clear and as such there is no work to do
4133 	 */
4134 	if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
4135 		return;
4136 
4137 	/*
4138 	 * Set loopback enable to prevent any transmits from being sent
4139 	 * should the link come up.  This assumes that the RXCTRL.RXEN bit
4140 	 * has already been cleared.
4141 	 */
4142 	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4143 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
4144 
4145 	/* initiate cleaning flow for buffers in the PCIe transaction layer */
4146 	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
4147 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
4148 			gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
4149 
4150 	/* Flush all writes and allow 20usec for all transactions to clear */
4151 	IXGBE_WRITE_FLUSH(hw);
4152 	usec_delay(20);
4153 
4154 	/* restore previous register values */
4155 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4156 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4157 }
4158 
4159