xref: /freebsd/sys/dev/ixgbe/ixgbe_common.c (revision 2f02600abfddfc4e9f20dd384a2e729b451e16bd)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2013, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "ixgbe_common.h"
36 #include "ixgbe_phy.h"
37 #include "ixgbe_dcb.h"
38 #include "ixgbe_dcb_82599.h"
39 #include "ixgbe_api.h"
40 
41 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
42 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
43 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
44 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
45 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
46 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
47 					u16 count);
48 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
49 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
50 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
51 static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
52 
53 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
54 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
55 					 u16 *san_mac_offset);
56 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
57 					     u16 words, u16 *data);
58 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
59 					      u16 words, u16 *data);
60 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
61 						 u16 offset);
62 
63 /**
64  *  ixgbe_init_ops_generic - Inits function ptrs
65  *  @hw: pointer to the hardware structure
66  *
67  *  Initialize the function pointers.
68  **/
69 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
70 {
71 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
72 	struct ixgbe_mac_info *mac = &hw->mac;
73 	u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
74 
75 	DEBUGFUNC("ixgbe_init_ops_generic");
76 
77 	/* EEPROM */
78 	eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic;
79 	/* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
80 	if (eec & IXGBE_EEC_PRES) {
81 		eeprom->ops.read = &ixgbe_read_eerd_generic;
82 		eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_generic;
83 	} else {
84 		eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic;
85 		eeprom->ops.read_buffer =
86 				 &ixgbe_read_eeprom_buffer_bit_bang_generic;
87 	}
88 	eeprom->ops.write = &ixgbe_write_eeprom_generic;
89 	eeprom->ops.write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic;
90 	eeprom->ops.validate_checksum =
91 				      &ixgbe_validate_eeprom_checksum_generic;
92 	eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic;
93 	eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic;
94 
95 	/* MAC */
96 	mac->ops.init_hw = &ixgbe_init_hw_generic;
97 	mac->ops.reset_hw = NULL;
98 	mac->ops.start_hw = &ixgbe_start_hw_generic;
99 	mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic;
100 	mac->ops.get_media_type = NULL;
101 	mac->ops.get_supported_physical_layer = NULL;
102 	mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_generic;
103 	mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic;
104 	mac->ops.stop_adapter = &ixgbe_stop_adapter_generic;
105 	mac->ops.get_bus_info = &ixgbe_get_bus_info_generic;
106 	mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie;
107 	mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync;
108 	mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync;
109 
110 	/* LEDs */
111 	mac->ops.led_on = &ixgbe_led_on_generic;
112 	mac->ops.led_off = &ixgbe_led_off_generic;
113 	mac->ops.blink_led_start = &ixgbe_blink_led_start_generic;
114 	mac->ops.blink_led_stop = &ixgbe_blink_led_stop_generic;
115 
116 	/* RAR, Multicast, VLAN */
117 	mac->ops.set_rar = &ixgbe_set_rar_generic;
118 	mac->ops.clear_rar = &ixgbe_clear_rar_generic;
119 	mac->ops.insert_mac_addr = NULL;
120 	mac->ops.set_vmdq = NULL;
121 	mac->ops.clear_vmdq = NULL;
122 	mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic;
123 	mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic;
124 	mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic;
125 	mac->ops.enable_mc = &ixgbe_enable_mc_generic;
126 	mac->ops.disable_mc = &ixgbe_disable_mc_generic;
127 	mac->ops.clear_vfta = NULL;
128 	mac->ops.set_vfta = NULL;
129 	mac->ops.set_vlvf = NULL;
130 	mac->ops.init_uta_tables = NULL;
131 
132 	/* Flow Control */
133 	mac->ops.fc_enable = &ixgbe_fc_enable_generic;
134 
135 	/* Link */
136 	mac->ops.get_link_capabilities = NULL;
137 	mac->ops.setup_link = NULL;
138 	mac->ops.check_link = NULL;
139 	mac->ops.dmac_config = NULL;
140 	mac->ops.dmac_update_tcs = NULL;
141 	mac->ops.dmac_config_tcs = NULL;
142 
143 	return IXGBE_SUCCESS;
144 }
145 
146 /**
147  * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
148  * of flow control
149  * @hw: pointer to hardware structure
150  *
151  * This function returns TRUE if the device supports flow control
152  * autonegotiation, and FALSE if it does not.
153  *
154  **/
155 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
156 {
157 	bool supported = FALSE;
158 	ixgbe_link_speed speed;
159 	bool link_up;
160 
161 	DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
162 
163 	switch (hw->phy.media_type) {
164 	case ixgbe_media_type_fiber_fixed:
165 	case ixgbe_media_type_fiber:
166 		hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
167 		/* if link is down, assume supported */
168 		if (link_up)
169 			supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
170 				TRUE : FALSE;
171 		else
172 			supported = TRUE;
173 		break;
174 	case ixgbe_media_type_backplane:
175 		supported = TRUE;
176 		break;
177 	case ixgbe_media_type_copper:
178 		/* only some copper devices support flow control autoneg */
179 		switch (hw->device_id) {
180 		case IXGBE_DEV_ID_82599_T3_LOM:
181 		case IXGBE_DEV_ID_X540T:
182 		case IXGBE_DEV_ID_X540_BYPASS:
183 			supported = TRUE;
184 			break;
185 		default:
186 			supported = FALSE;
187 		}
188 	default:
189 		break;
190 	}
191 
192 	ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
193 		      "Device %x does not support flow control autoneg",
194 		      hw->device_id);
195 	return supported;
196 }
197 
198 /**
199  *  ixgbe_setup_fc - Set up flow control
200  *  @hw: pointer to hardware structure
201  *
202  *  Called at init time to set up flow control.
203  **/
204 static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
205 {
206 	s32 ret_val = IXGBE_SUCCESS;
207 	u32 reg = 0, reg_bp = 0;
208 	u16 reg_cu = 0;
209 	bool got_lock = FALSE;
210 
211 	DEBUGFUNC("ixgbe_setup_fc");
212 
213 	/*
214 	 * Validate the requested mode.  Strict IEEE mode does not allow
215 	 * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
216 	 */
217 	if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
218 		ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
219 			   "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
220 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
221 		goto out;
222 	}
223 
224 	/*
225 	 * 10gig parts do not have a word in the EEPROM to determine the
226 	 * default flow control setting, so we explicitly set it to full.
227 	 */
228 	if (hw->fc.requested_mode == ixgbe_fc_default)
229 		hw->fc.requested_mode = ixgbe_fc_full;
230 
231 	/*
232 	 * Set up the 1G and 10G flow control advertisement registers so the
233 	 * HW will be able to do fc autoneg once the cable is plugged in.  If
234 	 * we link at 10G, the 1G advertisement is harmless and vice versa.
235 	 */
236 	switch (hw->phy.media_type) {
237 	case ixgbe_media_type_fiber_fixed:
238 	case ixgbe_media_type_fiber:
239 	case ixgbe_media_type_backplane:
240 		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
241 		reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
242 		break;
243 	case ixgbe_media_type_copper:
244 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
245 				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg_cu);
246 		break;
247 	default:
248 		break;
249 	}
250 
251 	/*
252 	 * The possible values of fc.requested_mode are:
253 	 * 0: Flow control is completely disabled
254 	 * 1: Rx flow control is enabled (we can receive pause frames,
255 	 *    but not send pause frames).
256 	 * 2: Tx flow control is enabled (we can send pause frames but
257 	 *    we do not support receiving pause frames).
258 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
259 	 * other: Invalid.
260 	 */
261 	switch (hw->fc.requested_mode) {
262 	case ixgbe_fc_none:
263 		/* Flow control completely disabled by software override. */
264 		reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
265 		if (hw->phy.media_type == ixgbe_media_type_backplane)
266 			reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
267 				    IXGBE_AUTOC_ASM_PAUSE);
268 		else if (hw->phy.media_type == ixgbe_media_type_copper)
269 			reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
270 		break;
271 	case ixgbe_fc_tx_pause:
272 		/*
273 		 * Tx Flow control is enabled, and Rx Flow control is
274 		 * disabled by software override.
275 		 */
276 		reg |= IXGBE_PCS1GANA_ASM_PAUSE;
277 		reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
278 		if (hw->phy.media_type == ixgbe_media_type_backplane) {
279 			reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
280 			reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
281 		} else if (hw->phy.media_type == ixgbe_media_type_copper) {
282 			reg_cu |= IXGBE_TAF_ASM_PAUSE;
283 			reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
284 		}
285 		break;
286 	case ixgbe_fc_rx_pause:
287 		/*
288 		 * Rx Flow control is enabled and Tx Flow control is
289 		 * disabled by software override. Since there really
290 		 * isn't a way to advertise that we are capable of RX
291 		 * Pause ONLY, we will advertise that we support both
292 		 * symmetric and asymmetric Rx PAUSE, as such we fall
293 		 * through to the fc_full statement.  Later, we will
294 		 * disable the adapter's ability to send PAUSE frames.
295 		 */
296 	case ixgbe_fc_full:
297 		/* Flow control (both Rx and Tx) is enabled by SW override. */
298 		reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
299 		if (hw->phy.media_type == ixgbe_media_type_backplane)
300 			reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
301 				  IXGBE_AUTOC_ASM_PAUSE;
302 		else if (hw->phy.media_type == ixgbe_media_type_copper)
303 			reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
304 		break;
305 	default:
306 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
307 			     "Flow control param set incorrectly\n");
308 		ret_val = IXGBE_ERR_CONFIG;
309 		goto out;
310 		break;
311 	}
312 
313 	if (hw->mac.type != ixgbe_mac_X540) {
314 		/*
315 		 * Enable auto-negotiation between the MAC & PHY;
316 		 * the MAC will advertise clause 37 flow control.
317 		 */
318 		IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
319 		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
320 
321 		/* Disable AN timeout */
322 		if (hw->fc.strict_ieee)
323 			reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
324 
325 		IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
326 		DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
327 	}
328 
329 	/*
330 	 * AUTOC restart handles negotiation of 1G and 10G on backplane
331 	 * and copper. There is no need to set the PCS1GCTL register.
332 	 *
333 	 */
334 	if (hw->phy.media_type == ixgbe_media_type_backplane) {
335 		reg_bp |= IXGBE_AUTOC_AN_RESTART;
336 		/* Need the SW/FW semaphore around AUTOC writes if 82599 and
337 		 * LESM is on, likewise reset_pipeline requries the lock as
338 		 * it also writes AUTOC.
339 		 */
340 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
341 		    ixgbe_verify_lesm_fw_enabled_82599(hw)) {
342 			ret_val = hw->mac.ops.acquire_swfw_sync(hw,
343 							IXGBE_GSSR_MAC_CSR_SM);
344 			if (ret_val != IXGBE_SUCCESS) {
345 				ret_val = IXGBE_ERR_SWFW_SYNC;
346 				goto out;
347 			}
348 			got_lock = TRUE;
349 		}
350 
351 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
352 		if (hw->mac.type == ixgbe_mac_82599EB)
353 			ixgbe_reset_pipeline_82599(hw);
354 
355 		if (got_lock)
356 			hw->mac.ops.release_swfw_sync(hw,
357 						      IXGBE_GSSR_MAC_CSR_SM);
358 	} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
359 		    (ixgbe_device_supports_autoneg_fc(hw))) {
360 		hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
361 				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
362 	}
363 
364 	DEBUGOUT1("Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
365 out:
366 	return ret_val;
367 }
368 
369 /**
370  *  ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
371  *  @hw: pointer to hardware structure
372  *
373  *  Starts the hardware by filling the bus info structure and media type, clears
374  *  all on chip counters, initializes receive address registers, multicast
375  *  table, VLAN filter table, calls routine to set up link and flow control
376  *  settings, and leaves transmit and receive units disabled and uninitialized
377  **/
378 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
379 {
380 	s32 ret_val;
381 	u32 ctrl_ext;
382 
383 	DEBUGFUNC("ixgbe_start_hw_generic");
384 
385 	/* Set the media type */
386 	hw->phy.media_type = hw->mac.ops.get_media_type(hw);
387 
388 	/* PHY ops initialization must be done in reset_hw() */
389 
390 	/* Clear the VLAN filter table */
391 	hw->mac.ops.clear_vfta(hw);
392 
393 	/* Clear statistics registers */
394 	hw->mac.ops.clear_hw_cntrs(hw);
395 
396 	/* Set No Snoop Disable */
397 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
398 	ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
399 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
400 	IXGBE_WRITE_FLUSH(hw);
401 
402 	/* Setup flow control */
403 	ret_val = ixgbe_setup_fc(hw);
404 	if (ret_val != IXGBE_SUCCESS)
405 		goto out;
406 
407 	/* Clear adapter stopped flag */
408 	hw->adapter_stopped = FALSE;
409 
410 out:
411 	return ret_val;
412 }
413 
414 /**
415  *  ixgbe_start_hw_gen2 - Init sequence for common device family
416  *  @hw: pointer to hw structure
417  *
418  * Performs the init sequence common to the second generation
419  * of 10 GbE devices.
420  * Devices in the second generation:
421  *     82599
422  *     X540
423  **/
424 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
425 {
426 	u32 i;
427 	u32 regval;
428 
429 	/* Clear the rate limiters */
430 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
431 		IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
432 		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
433 	}
434 	IXGBE_WRITE_FLUSH(hw);
435 
436 	/* Disable relaxed ordering */
437 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
438 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
439 		regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
440 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
441 	}
442 
443 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
444 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
445 		regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
446 			    IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
447 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
448 	}
449 
450 	return IXGBE_SUCCESS;
451 }
452 
453 /**
454  *  ixgbe_init_hw_generic - Generic hardware initialization
455  *  @hw: pointer to hardware structure
456  *
457  *  Initialize the hardware by resetting the hardware, filling the bus info
458  *  structure and media type, clears all on chip counters, initializes receive
459  *  address registers, multicast table, VLAN filter table, calls routine to set
460  *  up link and flow control settings, and leaves transmit and receive units
461  *  disabled and uninitialized
462  **/
463 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
464 {
465 	s32 status;
466 
467 	DEBUGFUNC("ixgbe_init_hw_generic");
468 
469 	/* Reset the hardware */
470 	status = hw->mac.ops.reset_hw(hw);
471 
472 	if (status == IXGBE_SUCCESS) {
473 		/* Start the HW */
474 		status = hw->mac.ops.start_hw(hw);
475 	}
476 
477 	return status;
478 }
479 
480 /**
481  *  ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
482  *  @hw: pointer to hardware structure
483  *
484  *  Clears all hardware statistics counters by reading them from the hardware
485  *  Statistics counters are clear on read.
486  **/
487 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
488 {
489 	u16 i = 0;
490 
491 	DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
492 
493 	IXGBE_READ_REG(hw, IXGBE_CRCERRS);
494 	IXGBE_READ_REG(hw, IXGBE_ILLERRC);
495 	IXGBE_READ_REG(hw, IXGBE_ERRBC);
496 	IXGBE_READ_REG(hw, IXGBE_MSPDC);
497 	for (i = 0; i < 8; i++)
498 		IXGBE_READ_REG(hw, IXGBE_MPC(i));
499 
500 	IXGBE_READ_REG(hw, IXGBE_MLFC);
501 	IXGBE_READ_REG(hw, IXGBE_MRFC);
502 	IXGBE_READ_REG(hw, IXGBE_RLEC);
503 	IXGBE_READ_REG(hw, IXGBE_LXONTXC);
504 	IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
505 	if (hw->mac.type >= ixgbe_mac_82599EB) {
506 		IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
507 		IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
508 	} else {
509 		IXGBE_READ_REG(hw, IXGBE_LXONRXC);
510 		IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
511 	}
512 
513 	for (i = 0; i < 8; i++) {
514 		IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
515 		IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
516 		if (hw->mac.type >= ixgbe_mac_82599EB) {
517 			IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
518 			IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
519 		} else {
520 			IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
521 			IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
522 		}
523 	}
524 	if (hw->mac.type >= ixgbe_mac_82599EB)
525 		for (i = 0; i < 8; i++)
526 			IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
527 	IXGBE_READ_REG(hw, IXGBE_PRC64);
528 	IXGBE_READ_REG(hw, IXGBE_PRC127);
529 	IXGBE_READ_REG(hw, IXGBE_PRC255);
530 	IXGBE_READ_REG(hw, IXGBE_PRC511);
531 	IXGBE_READ_REG(hw, IXGBE_PRC1023);
532 	IXGBE_READ_REG(hw, IXGBE_PRC1522);
533 	IXGBE_READ_REG(hw, IXGBE_GPRC);
534 	IXGBE_READ_REG(hw, IXGBE_BPRC);
535 	IXGBE_READ_REG(hw, IXGBE_MPRC);
536 	IXGBE_READ_REG(hw, IXGBE_GPTC);
537 	IXGBE_READ_REG(hw, IXGBE_GORCL);
538 	IXGBE_READ_REG(hw, IXGBE_GORCH);
539 	IXGBE_READ_REG(hw, IXGBE_GOTCL);
540 	IXGBE_READ_REG(hw, IXGBE_GOTCH);
541 	if (hw->mac.type == ixgbe_mac_82598EB)
542 		for (i = 0; i < 8; i++)
543 			IXGBE_READ_REG(hw, IXGBE_RNBC(i));
544 	IXGBE_READ_REG(hw, IXGBE_RUC);
545 	IXGBE_READ_REG(hw, IXGBE_RFC);
546 	IXGBE_READ_REG(hw, IXGBE_ROC);
547 	IXGBE_READ_REG(hw, IXGBE_RJC);
548 	IXGBE_READ_REG(hw, IXGBE_MNGPRC);
549 	IXGBE_READ_REG(hw, IXGBE_MNGPDC);
550 	IXGBE_READ_REG(hw, IXGBE_MNGPTC);
551 	IXGBE_READ_REG(hw, IXGBE_TORL);
552 	IXGBE_READ_REG(hw, IXGBE_TORH);
553 	IXGBE_READ_REG(hw, IXGBE_TPR);
554 	IXGBE_READ_REG(hw, IXGBE_TPT);
555 	IXGBE_READ_REG(hw, IXGBE_PTC64);
556 	IXGBE_READ_REG(hw, IXGBE_PTC127);
557 	IXGBE_READ_REG(hw, IXGBE_PTC255);
558 	IXGBE_READ_REG(hw, IXGBE_PTC511);
559 	IXGBE_READ_REG(hw, IXGBE_PTC1023);
560 	IXGBE_READ_REG(hw, IXGBE_PTC1522);
561 	IXGBE_READ_REG(hw, IXGBE_MPTC);
562 	IXGBE_READ_REG(hw, IXGBE_BPTC);
563 	for (i = 0; i < 16; i++) {
564 		IXGBE_READ_REG(hw, IXGBE_QPRC(i));
565 		IXGBE_READ_REG(hw, IXGBE_QPTC(i));
566 		if (hw->mac.type >= ixgbe_mac_82599EB) {
567 			IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
568 			IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
569 			IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
570 			IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
571 			IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
572 		} else {
573 			IXGBE_READ_REG(hw, IXGBE_QBRC(i));
574 			IXGBE_READ_REG(hw, IXGBE_QBTC(i));
575 		}
576 	}
577 
578 	if (hw->mac.type == ixgbe_mac_X540) {
579 		if (hw->phy.id == 0)
580 			ixgbe_identify_phy(hw);
581 		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
582 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
583 		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
584 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
585 		hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
586 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
587 		hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
588 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
589 	}
590 
591 	return IXGBE_SUCCESS;
592 }
593 
594 /**
595  *  ixgbe_read_pba_string_generic - Reads part number string from EEPROM
596  *  @hw: pointer to hardware structure
597  *  @pba_num: stores the part number string from the EEPROM
598  *  @pba_num_size: part number string buffer length
599  *
600  *  Reads the part number string from the EEPROM.
601  **/
602 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
603 				  u32 pba_num_size)
604 {
605 	s32 ret_val;
606 	u16 data;
607 	u16 pba_ptr;
608 	u16 offset;
609 	u16 length;
610 
611 	DEBUGFUNC("ixgbe_read_pba_string_generic");
612 
613 	if (pba_num == NULL) {
614 		DEBUGOUT("PBA string buffer was null\n");
615 		return IXGBE_ERR_INVALID_ARGUMENT;
616 	}
617 
618 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
619 	if (ret_val) {
620 		DEBUGOUT("NVM Read Error\n");
621 		return ret_val;
622 	}
623 
624 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
625 	if (ret_val) {
626 		DEBUGOUT("NVM Read Error\n");
627 		return ret_val;
628 	}
629 
630 	/*
631 	 * if data is not ptr guard the PBA must be in legacy format which
632 	 * means pba_ptr is actually our second data word for the PBA number
633 	 * and we can decode it into an ascii string
634 	 */
635 	if (data != IXGBE_PBANUM_PTR_GUARD) {
636 		DEBUGOUT("NVM PBA number is not stored as string\n");
637 
638 		/* we will need 11 characters to store the PBA */
639 		if (pba_num_size < 11) {
640 			DEBUGOUT("PBA string buffer too small\n");
641 			return IXGBE_ERR_NO_SPACE;
642 		}
643 
644 		/* extract hex string from data and pba_ptr */
645 		pba_num[0] = (data >> 12) & 0xF;
646 		pba_num[1] = (data >> 8) & 0xF;
647 		pba_num[2] = (data >> 4) & 0xF;
648 		pba_num[3] = data & 0xF;
649 		pba_num[4] = (pba_ptr >> 12) & 0xF;
650 		pba_num[5] = (pba_ptr >> 8) & 0xF;
651 		pba_num[6] = '-';
652 		pba_num[7] = 0;
653 		pba_num[8] = (pba_ptr >> 4) & 0xF;
654 		pba_num[9] = pba_ptr & 0xF;
655 
656 		/* put a null character on the end of our string */
657 		pba_num[10] = '\0';
658 
659 		/* switch all the data but the '-' to hex char */
660 		for (offset = 0; offset < 10; offset++) {
661 			if (pba_num[offset] < 0xA)
662 				pba_num[offset] += '0';
663 			else if (pba_num[offset] < 0x10)
664 				pba_num[offset] += 'A' - 0xA;
665 		}
666 
667 		return IXGBE_SUCCESS;
668 	}
669 
670 	ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
671 	if (ret_val) {
672 		DEBUGOUT("NVM Read Error\n");
673 		return ret_val;
674 	}
675 
676 	if (length == 0xFFFF || length == 0) {
677 		DEBUGOUT("NVM PBA number section invalid length\n");
678 		return IXGBE_ERR_PBA_SECTION;
679 	}
680 
681 	/* check if pba_num buffer is big enough */
682 	if (pba_num_size  < (((u32)length * 2) - 1)) {
683 		DEBUGOUT("PBA string buffer too small\n");
684 		return IXGBE_ERR_NO_SPACE;
685 	}
686 
687 	/* trim pba length from start of string */
688 	pba_ptr++;
689 	length--;
690 
691 	for (offset = 0; offset < length; offset++) {
692 		ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
693 		if (ret_val) {
694 			DEBUGOUT("NVM Read Error\n");
695 			return ret_val;
696 		}
697 		pba_num[offset * 2] = (u8)(data >> 8);
698 		pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
699 	}
700 	pba_num[offset * 2] = '\0';
701 
702 	return IXGBE_SUCCESS;
703 }
704 
705 /**
706  *  ixgbe_read_pba_num_generic - Reads part number from EEPROM
707  *  @hw: pointer to hardware structure
708  *  @pba_num: stores the part number from the EEPROM
709  *
710  *  Reads the part number from the EEPROM.
711  **/
712 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
713 {
714 	s32 ret_val;
715 	u16 data;
716 
717 	DEBUGFUNC("ixgbe_read_pba_num_generic");
718 
719 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
720 	if (ret_val) {
721 		DEBUGOUT("NVM Read Error\n");
722 		return ret_val;
723 	} else if (data == IXGBE_PBANUM_PTR_GUARD) {
724 		DEBUGOUT("NVM Not supported\n");
725 		return IXGBE_NOT_IMPLEMENTED;
726 	}
727 	*pba_num = (u32)(data << 16);
728 
729 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
730 	if (ret_val) {
731 		DEBUGOUT("NVM Read Error\n");
732 		return ret_val;
733 	}
734 	*pba_num |= data;
735 
736 	return IXGBE_SUCCESS;
737 }
738 
739 /**
740  *  ixgbe_read_pba_raw
741  *  @hw: pointer to the HW structure
742  *  @eeprom_buf: optional pointer to EEPROM image
743  *  @eeprom_buf_size: size of EEPROM image in words
744  *  @max_pba_block_size: PBA block size limit
745  *  @pba: pointer to output PBA structure
746  *
747  *  Reads PBA from EEPROM image when eeprom_buf is not NULL.
748  *  Reads PBA from physical EEPROM device when eeprom_buf is NULL.
749  *
750  **/
751 s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
752 		       u32 eeprom_buf_size, u16 max_pba_block_size,
753 		       struct ixgbe_pba *pba)
754 {
755 	s32 ret_val;
756 	u16 pba_block_size;
757 
758 	if (pba == NULL)
759 		return IXGBE_ERR_PARAM;
760 
761 	if (eeprom_buf == NULL) {
762 		ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
763 						     &pba->word[0]);
764 		if (ret_val)
765 			return ret_val;
766 	} else {
767 		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
768 			pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
769 			pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
770 		} else {
771 			return IXGBE_ERR_PARAM;
772 		}
773 	}
774 
775 	if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
776 		if (pba->pba_block == NULL)
777 			return IXGBE_ERR_PARAM;
778 
779 		ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
780 						   eeprom_buf_size,
781 						   &pba_block_size);
782 		if (ret_val)
783 			return ret_val;
784 
785 		if (pba_block_size > max_pba_block_size)
786 			return IXGBE_ERR_PARAM;
787 
788 		if (eeprom_buf == NULL) {
789 			ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
790 							     pba_block_size,
791 							     pba->pba_block);
792 			if (ret_val)
793 				return ret_val;
794 		} else {
795 			if (eeprom_buf_size > (u32)(pba->word[1] +
796 					      pba->pba_block[0])) {
797 				memcpy(pba->pba_block,
798 				       &eeprom_buf[pba->word[1]],
799 				       pba_block_size * sizeof(u16));
800 			} else {
801 				return IXGBE_ERR_PARAM;
802 			}
803 		}
804 	}
805 
806 	return IXGBE_SUCCESS;
807 }
808 
809 /**
810  *  ixgbe_write_pba_raw
811  *  @hw: pointer to the HW structure
812  *  @eeprom_buf: optional pointer to EEPROM image
813  *  @eeprom_buf_size: size of EEPROM image in words
814  *  @pba: pointer to PBA structure
815  *
816  *  Writes PBA to EEPROM image when eeprom_buf is not NULL.
817  *  Writes PBA to physical EEPROM device when eeprom_buf is NULL.
818  *
819  **/
820 s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
821 			u32 eeprom_buf_size, struct ixgbe_pba *pba)
822 {
823 	s32 ret_val;
824 
825 	if (pba == NULL)
826 		return IXGBE_ERR_PARAM;
827 
828 	if (eeprom_buf == NULL) {
829 		ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
830 						      &pba->word[0]);
831 		if (ret_val)
832 			return ret_val;
833 	} else {
834 		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
835 			eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
836 			eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
837 		} else {
838 			return IXGBE_ERR_PARAM;
839 		}
840 	}
841 
842 	if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
843 		if (pba->pba_block == NULL)
844 			return IXGBE_ERR_PARAM;
845 
846 		if (eeprom_buf == NULL) {
847 			ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
848 							      pba->pba_block[0],
849 							      pba->pba_block);
850 			if (ret_val)
851 				return ret_val;
852 		} else {
853 			if (eeprom_buf_size > (u32)(pba->word[1] +
854 					      pba->pba_block[0])) {
855 				memcpy(&eeprom_buf[pba->word[1]],
856 				       pba->pba_block,
857 				       pba->pba_block[0] * sizeof(u16));
858 			} else {
859 				return IXGBE_ERR_PARAM;
860 			}
861 		}
862 	}
863 
864 	return IXGBE_SUCCESS;
865 }
866 
867 /**
868  *  ixgbe_get_pba_block_size
869  *  @hw: pointer to the HW structure
870  *  @eeprom_buf: optional pointer to EEPROM image
871  *  @eeprom_buf_size: size of EEPROM image in words
872  *  @pba_data_size: pointer to output variable
873  *
874  *  Returns the size of the PBA block in words. Function operates on EEPROM
875  *  image if the eeprom_buf pointer is not NULL otherwise it accesses physical
876  *  EEPROM device.
877  *
878  **/
879 s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
880 			     u32 eeprom_buf_size, u16 *pba_block_size)
881 {
882 	s32 ret_val;
883 	u16 pba_word[2];
884 	u16 length;
885 
886 	DEBUGFUNC("ixgbe_get_pba_block_size");
887 
888 	if (eeprom_buf == NULL) {
889 		ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
890 						     &pba_word[0]);
891 		if (ret_val)
892 			return ret_val;
893 	} else {
894 		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
895 			pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
896 			pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
897 		} else {
898 			return IXGBE_ERR_PARAM;
899 		}
900 	}
901 
902 	if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
903 		if (eeprom_buf == NULL) {
904 			ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
905 						      &length);
906 			if (ret_val)
907 				return ret_val;
908 		} else {
909 			if (eeprom_buf_size > pba_word[1])
910 				length = eeprom_buf[pba_word[1] + 0];
911 			else
912 				return IXGBE_ERR_PARAM;
913 		}
914 
915 		if (length == 0xFFFF || length == 0)
916 			return IXGBE_ERR_PBA_SECTION;
917 	} else {
918 		/* PBA number in legacy format, there is no PBA Block. */
919 		length = 0;
920 	}
921 
922 	if (pba_block_size != NULL)
923 		*pba_block_size = length;
924 
925 	return IXGBE_SUCCESS;
926 }
927 
928 /**
929  *  ixgbe_get_mac_addr_generic - Generic get MAC address
930  *  @hw: pointer to hardware structure
931  *  @mac_addr: Adapter MAC address
932  *
933  *  Reads the adapter's MAC address from first Receive Address Register (RAR0)
934  *  A reset of the adapter must be performed prior to calling this function
935  *  in order for the MAC address to have been loaded from the EEPROM into RAR0
936  **/
937 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
938 {
939 	u32 rar_high;
940 	u32 rar_low;
941 	u16 i;
942 
943 	DEBUGFUNC("ixgbe_get_mac_addr_generic");
944 
945 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
946 	rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
947 
948 	for (i = 0; i < 4; i++)
949 		mac_addr[i] = (u8)(rar_low >> (i*8));
950 
951 	for (i = 0; i < 2; i++)
952 		mac_addr[i+4] = (u8)(rar_high >> (i*8));
953 
954 	return IXGBE_SUCCESS;
955 }
956 
957 /**
958  *  ixgbe_set_pci_config_data_generic - Generic store PCI bus info
959  *  @hw: pointer to hardware structure
960  *  @link_status: the link status returned by the PCI config space
961  *
962  *  Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure
963  **/
964 void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
965 {
966 	struct ixgbe_mac_info *mac = &hw->mac;
967 
968 	hw->bus.type = ixgbe_bus_type_pci_express;
969 
970 	switch (link_status & IXGBE_PCI_LINK_WIDTH) {
971 	case IXGBE_PCI_LINK_WIDTH_1:
972 		hw->bus.width = ixgbe_bus_width_pcie_x1;
973 		break;
974 	case IXGBE_PCI_LINK_WIDTH_2:
975 		hw->bus.width = ixgbe_bus_width_pcie_x2;
976 		break;
977 	case IXGBE_PCI_LINK_WIDTH_4:
978 		hw->bus.width = ixgbe_bus_width_pcie_x4;
979 		break;
980 	case IXGBE_PCI_LINK_WIDTH_8:
981 		hw->bus.width = ixgbe_bus_width_pcie_x8;
982 		break;
983 	default:
984 		hw->bus.width = ixgbe_bus_width_unknown;
985 		break;
986 	}
987 
988 	switch (link_status & IXGBE_PCI_LINK_SPEED) {
989 	case IXGBE_PCI_LINK_SPEED_2500:
990 		hw->bus.speed = ixgbe_bus_speed_2500;
991 		break;
992 	case IXGBE_PCI_LINK_SPEED_5000:
993 		hw->bus.speed = ixgbe_bus_speed_5000;
994 		break;
995 	case IXGBE_PCI_LINK_SPEED_8000:
996 		hw->bus.speed = ixgbe_bus_speed_8000;
997 		break;
998 	default:
999 		hw->bus.speed = ixgbe_bus_speed_unknown;
1000 		break;
1001 	}
1002 
1003 	mac->ops.set_lan_id(hw);
1004 }
1005 
1006 /**
1007  *  ixgbe_get_bus_info_generic - Generic set PCI bus info
1008  *  @hw: pointer to hardware structure
1009  *
1010  *  Gets the PCI bus info (speed, width, type) then calls helper function to
1011  *  store this data within the ixgbe_hw structure.
1012  **/
1013 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
1014 {
1015 	u16 link_status;
1016 
1017 	DEBUGFUNC("ixgbe_get_bus_info_generic");
1018 
1019 	/* Get the negotiated link width and speed from PCI config space */
1020 	link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
1021 
1022 	ixgbe_set_pci_config_data_generic(hw, link_status);
1023 
1024 	return IXGBE_SUCCESS;
1025 }
1026 
1027 /**
1028  *  ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
1029  *  @hw: pointer to the HW structure
1030  *
1031  *  Determines the LAN function id by reading memory-mapped registers
1032  *  and swaps the port value if requested.
1033  **/
1034 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
1035 {
1036 	struct ixgbe_bus_info *bus = &hw->bus;
1037 	u32 reg;
1038 
1039 	DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
1040 
1041 	reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
1042 	bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
1043 	bus->lan_id = bus->func;
1044 
1045 	/* check for a port swap */
1046 	reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
1047 	if (reg & IXGBE_FACTPS_LFS)
1048 		bus->func ^= 0x1;
1049 }
1050 
1051 /**
1052  *  ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
1053  *  @hw: pointer to hardware structure
1054  *
1055  *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
1056  *  disables transmit and receive units. The adapter_stopped flag is used by
1057  *  the shared code and drivers to determine if the adapter is in a stopped
1058  *  state and should not touch the hardware.
1059  **/
1060 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
1061 {
1062 	u32 reg_val;
1063 	u16 i;
1064 
1065 	DEBUGFUNC("ixgbe_stop_adapter_generic");
1066 
1067 	/*
1068 	 * Set the adapter_stopped flag so other driver functions stop touching
1069 	 * the hardware
1070 	 */
1071 	hw->adapter_stopped = TRUE;
1072 
1073 	/* Disable the receive unit */
1074 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0);
1075 
1076 	/* Clear interrupt mask to stop interrupts from being generated */
1077 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
1078 
1079 	/* Clear any pending interrupts, flush previous writes */
1080 	IXGBE_READ_REG(hw, IXGBE_EICR);
1081 
1082 	/* Disable the transmit unit.  Each queue must be disabled. */
1083 	for (i = 0; i < hw->mac.max_tx_queues; i++)
1084 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
1085 
1086 	/* Disable the receive unit by stopping each queue */
1087 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
1088 		reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1089 		reg_val &= ~IXGBE_RXDCTL_ENABLE;
1090 		reg_val |= IXGBE_RXDCTL_SWFLSH;
1091 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
1092 	}
1093 
1094 	/* flush all queues disables */
1095 	IXGBE_WRITE_FLUSH(hw);
1096 	msec_delay(2);
1097 
1098 	/*
1099 	 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
1100 	 * access and verify no pending requests
1101 	 */
1102 	return ixgbe_disable_pcie_master(hw);
1103 }
1104 
1105 /**
1106  *  ixgbe_led_on_generic - Turns on the software controllable LEDs.
1107  *  @hw: pointer to hardware structure
1108  *  @index: led number to turn on
1109  **/
1110 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
1111 {
1112 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1113 
1114 	DEBUGFUNC("ixgbe_led_on_generic");
1115 
1116 	/* To turn on the LED, set mode to ON. */
1117 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
1118 	led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
1119 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1120 	IXGBE_WRITE_FLUSH(hw);
1121 
1122 	return IXGBE_SUCCESS;
1123 }
1124 
1125 /**
1126  *  ixgbe_led_off_generic - Turns off the software controllable LEDs.
1127  *  @hw: pointer to hardware structure
1128  *  @index: led number to turn off
1129  **/
1130 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
1131 {
1132 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1133 
1134 	DEBUGFUNC("ixgbe_led_off_generic");
1135 
1136 	/* To turn off the LED, set mode to OFF. */
1137 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
1138 	led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
1139 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1140 	IXGBE_WRITE_FLUSH(hw);
1141 
1142 	return IXGBE_SUCCESS;
1143 }
1144 
1145 /**
1146  *  ixgbe_init_eeprom_params_generic - Initialize EEPROM params
1147  *  @hw: pointer to hardware structure
1148  *
1149  *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
1150  *  ixgbe_hw struct in order to set up EEPROM access.
1151  **/
1152 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
1153 {
1154 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1155 	u32 eec;
1156 	u16 eeprom_size;
1157 
1158 	DEBUGFUNC("ixgbe_init_eeprom_params_generic");
1159 
1160 	if (eeprom->type == ixgbe_eeprom_uninitialized) {
1161 		eeprom->type = ixgbe_eeprom_none;
1162 		/* Set default semaphore delay to 10ms which is a well
1163 		 * tested value */
1164 		eeprom->semaphore_delay = 10;
1165 		/* Clear EEPROM page size, it will be initialized as needed */
1166 		eeprom->word_page_size = 0;
1167 
1168 		/*
1169 		 * Check for EEPROM present first.
1170 		 * If not present leave as none
1171 		 */
1172 		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1173 		if (eec & IXGBE_EEC_PRES) {
1174 			eeprom->type = ixgbe_eeprom_spi;
1175 
1176 			/*
1177 			 * SPI EEPROM is assumed here.  This code would need to
1178 			 * change if a future EEPROM is not SPI.
1179 			 */
1180 			eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1181 					    IXGBE_EEC_SIZE_SHIFT);
1182 			eeprom->word_size = 1 << (eeprom_size +
1183 					     IXGBE_EEPROM_WORD_SIZE_SHIFT);
1184 		}
1185 
1186 		if (eec & IXGBE_EEC_ADDR_SIZE)
1187 			eeprom->address_bits = 16;
1188 		else
1189 			eeprom->address_bits = 8;
1190 		DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
1191 			  "%d\n", eeprom->type, eeprom->word_size,
1192 			  eeprom->address_bits);
1193 	}
1194 
1195 	return IXGBE_SUCCESS;
1196 }
1197 
1198 /**
1199  *  ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
1200  *  @hw: pointer to hardware structure
1201  *  @offset: offset within the EEPROM to write
1202  *  @words: number of word(s)
1203  *  @data: 16 bit word(s) to write to EEPROM
1204  *
1205  *  Reads 16 bit word(s) from EEPROM through bit-bang method
1206  **/
1207 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1208 					       u16 words, u16 *data)
1209 {
1210 	s32 status = IXGBE_SUCCESS;
1211 	u16 i, count;
1212 
1213 	DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
1214 
1215 	hw->eeprom.ops.init_params(hw);
1216 
1217 	if (words == 0) {
1218 		status = IXGBE_ERR_INVALID_ARGUMENT;
1219 		goto out;
1220 	}
1221 
1222 	if (offset + words > hw->eeprom.word_size) {
1223 		status = IXGBE_ERR_EEPROM;
1224 		goto out;
1225 	}
1226 
1227 	/*
1228 	 * The EEPROM page size cannot be queried from the chip. We do lazy
1229 	 * initialization. It is worth to do that when we write large buffer.
1230 	 */
1231 	if ((hw->eeprom.word_page_size == 0) &&
1232 	    (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
1233 		ixgbe_detect_eeprom_page_size_generic(hw, offset);
1234 
1235 	/*
1236 	 * We cannot hold synchronization semaphores for too long
1237 	 * to avoid other entity starvation. However it is more efficient
1238 	 * to read in bursts than synchronizing access for each word.
1239 	 */
1240 	for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1241 		count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1242 			IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1243 		status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
1244 							    count, &data[i]);
1245 
1246 		if (status != IXGBE_SUCCESS)
1247 			break;
1248 	}
1249 
1250 out:
1251 	return status;
1252 }
1253 
1254 /**
1255  *  ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
1256  *  @hw: pointer to hardware structure
1257  *  @offset: offset within the EEPROM to be written to
1258  *  @words: number of word(s)
1259  *  @data: 16 bit word(s) to be written to the EEPROM
1260  *
1261  *  If ixgbe_eeprom_update_checksum is not called after this function, the
1262  *  EEPROM will most likely contain an invalid checksum.
1263  **/
1264 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1265 					      u16 words, u16 *data)
1266 {
1267 	s32 status;
1268 	u16 word;
1269 	u16 page_size;
1270 	u16 i;
1271 	u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
1272 
1273 	DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
1274 
1275 	/* Prepare the EEPROM for writing  */
1276 	status = ixgbe_acquire_eeprom(hw);
1277 
1278 	if (status == IXGBE_SUCCESS) {
1279 		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1280 			ixgbe_release_eeprom(hw);
1281 			status = IXGBE_ERR_EEPROM;
1282 		}
1283 	}
1284 
1285 	if (status == IXGBE_SUCCESS) {
1286 		for (i = 0; i < words; i++) {
1287 			ixgbe_standby_eeprom(hw);
1288 
1289 			/*  Send the WRITE ENABLE command (8 bit opcode )  */
1290 			ixgbe_shift_out_eeprom_bits(hw,
1291 						   IXGBE_EEPROM_WREN_OPCODE_SPI,
1292 						   IXGBE_EEPROM_OPCODE_BITS);
1293 
1294 			ixgbe_standby_eeprom(hw);
1295 
1296 			/*
1297 			 * Some SPI eeproms use the 8th address bit embedded
1298 			 * in the opcode
1299 			 */
1300 			if ((hw->eeprom.address_bits == 8) &&
1301 			    ((offset + i) >= 128))
1302 				write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1303 
1304 			/* Send the Write command (8-bit opcode + addr) */
1305 			ixgbe_shift_out_eeprom_bits(hw, write_opcode,
1306 						    IXGBE_EEPROM_OPCODE_BITS);
1307 			ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1308 						    hw->eeprom.address_bits);
1309 
1310 			page_size = hw->eeprom.word_page_size;
1311 
1312 			/* Send the data in burst via SPI*/
1313 			do {
1314 				word = data[i];
1315 				word = (word >> 8) | (word << 8);
1316 				ixgbe_shift_out_eeprom_bits(hw, word, 16);
1317 
1318 				if (page_size == 0)
1319 					break;
1320 
1321 				/* do not wrap around page */
1322 				if (((offset + i) & (page_size - 1)) ==
1323 				    (page_size - 1))
1324 					break;
1325 			} while (++i < words);
1326 
1327 			ixgbe_standby_eeprom(hw);
1328 			msec_delay(10);
1329 		}
1330 		/* Done with writing - release the EEPROM */
1331 		ixgbe_release_eeprom(hw);
1332 	}
1333 
1334 	return status;
1335 }
1336 
1337 /**
1338  *  ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
1339  *  @hw: pointer to hardware structure
1340  *  @offset: offset within the EEPROM to be written to
1341  *  @data: 16 bit word to be written to the EEPROM
1342  *
1343  *  If ixgbe_eeprom_update_checksum is not called after this function, the
1344  *  EEPROM will most likely contain an invalid checksum.
1345  **/
1346 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1347 {
1348 	s32 status;
1349 
1350 	DEBUGFUNC("ixgbe_write_eeprom_generic");
1351 
1352 	hw->eeprom.ops.init_params(hw);
1353 
1354 	if (offset >= hw->eeprom.word_size) {
1355 		status = IXGBE_ERR_EEPROM;
1356 		goto out;
1357 	}
1358 
1359 	status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
1360 
1361 out:
1362 	return status;
1363 }
1364 
1365 /**
1366  *  ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
1367  *  @hw: pointer to hardware structure
1368  *  @offset: offset within the EEPROM to be read
1369  *  @data: read 16 bit words(s) from EEPROM
1370  *  @words: number of word(s)
1371  *
1372  *  Reads 16 bit word(s) from EEPROM through bit-bang method
1373  **/
1374 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1375 					      u16 words, u16 *data)
1376 {
1377 	s32 status = IXGBE_SUCCESS;
1378 	u16 i, count;
1379 
1380 	DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
1381 
1382 	hw->eeprom.ops.init_params(hw);
1383 
1384 	if (words == 0) {
1385 		status = IXGBE_ERR_INVALID_ARGUMENT;
1386 		goto out;
1387 	}
1388 
1389 	if (offset + words > hw->eeprom.word_size) {
1390 		status = IXGBE_ERR_EEPROM;
1391 		goto out;
1392 	}
1393 
1394 	/*
1395 	 * We cannot hold synchronization semaphores for too long
1396 	 * to avoid other entity starvation. However it is more efficient
1397 	 * to read in bursts than synchronizing access for each word.
1398 	 */
1399 	for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1400 		count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1401 			IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1402 
1403 		status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1404 							   count, &data[i]);
1405 
1406 		if (status != IXGBE_SUCCESS)
1407 			break;
1408 	}
1409 
1410 out:
1411 	return status;
1412 }
1413 
1414 /**
1415  *  ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1416  *  @hw: pointer to hardware structure
1417  *  @offset: offset within the EEPROM to be read
1418  *  @words: number of word(s)
1419  *  @data: read 16 bit word(s) from EEPROM
1420  *
1421  *  Reads 16 bit word(s) from EEPROM through bit-bang method
1422  **/
1423 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1424 					     u16 words, u16 *data)
1425 {
1426 	s32 status;
1427 	u16 word_in;
1428 	u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1429 	u16 i;
1430 
1431 	DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
1432 
1433 	/* Prepare the EEPROM for reading  */
1434 	status = ixgbe_acquire_eeprom(hw);
1435 
1436 	if (status == IXGBE_SUCCESS) {
1437 		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1438 			ixgbe_release_eeprom(hw);
1439 			status = IXGBE_ERR_EEPROM;
1440 		}
1441 	}
1442 
1443 	if (status == IXGBE_SUCCESS) {
1444 		for (i = 0; i < words; i++) {
1445 			ixgbe_standby_eeprom(hw);
1446 			/*
1447 			 * Some SPI eeproms use the 8th address bit embedded
1448 			 * in the opcode
1449 			 */
1450 			if ((hw->eeprom.address_bits == 8) &&
1451 			    ((offset + i) >= 128))
1452 				read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1453 
1454 			/* Send the READ command (opcode + addr) */
1455 			ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1456 						    IXGBE_EEPROM_OPCODE_BITS);
1457 			ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1458 						    hw->eeprom.address_bits);
1459 
1460 			/* Read the data. */
1461 			word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1462 			data[i] = (word_in >> 8) | (word_in << 8);
1463 		}
1464 
1465 		/* End this read operation */
1466 		ixgbe_release_eeprom(hw);
1467 	}
1468 
1469 	return status;
1470 }
1471 
1472 /**
1473  *  ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1474  *  @hw: pointer to hardware structure
1475  *  @offset: offset within the EEPROM to be read
1476  *  @data: read 16 bit value from EEPROM
1477  *
1478  *  Reads 16 bit value from EEPROM through bit-bang method
1479  **/
1480 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1481 				       u16 *data)
1482 {
1483 	s32 status;
1484 
1485 	DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1486 
1487 	hw->eeprom.ops.init_params(hw);
1488 
1489 	if (offset >= hw->eeprom.word_size) {
1490 		status = IXGBE_ERR_EEPROM;
1491 		goto out;
1492 	}
1493 
1494 	status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1495 
1496 out:
1497 	return status;
1498 }
1499 
1500 /**
1501  *  ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1502  *  @hw: pointer to hardware structure
1503  *  @offset: offset of word in the EEPROM to read
1504  *  @words: number of word(s)
1505  *  @data: 16 bit word(s) from the EEPROM
1506  *
1507  *  Reads a 16 bit word(s) from the EEPROM using the EERD register.
1508  **/
1509 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1510 				   u16 words, u16 *data)
1511 {
1512 	u32 eerd;
1513 	s32 status = IXGBE_SUCCESS;
1514 	u32 i;
1515 
1516 	DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
1517 
1518 	hw->eeprom.ops.init_params(hw);
1519 
1520 	if (words == 0) {
1521 		status = IXGBE_ERR_INVALID_ARGUMENT;
1522 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1523 		goto out;
1524 	}
1525 
1526 	if (offset >= hw->eeprom.word_size) {
1527 		status = IXGBE_ERR_EEPROM;
1528 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1529 		goto out;
1530 	}
1531 
1532 	for (i = 0; i < words; i++) {
1533 		eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1534 		       IXGBE_EEPROM_RW_REG_START;
1535 
1536 		IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1537 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1538 
1539 		if (status == IXGBE_SUCCESS) {
1540 			data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1541 				   IXGBE_EEPROM_RW_REG_DATA);
1542 		} else {
1543 			DEBUGOUT("Eeprom read timed out\n");
1544 			goto out;
1545 		}
1546 	}
1547 out:
1548 	return status;
1549 }
1550 
1551 /**
1552  *  ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
1553  *  @hw: pointer to hardware structure
1554  *  @offset: offset within the EEPROM to be used as a scratch pad
1555  *
1556  *  Discover EEPROM page size by writing marching data at given offset.
1557  *  This function is called only when we are writing a new large buffer
1558  *  at given offset so the data would be overwritten anyway.
1559  **/
1560 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1561 						 u16 offset)
1562 {
1563 	u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1564 	s32 status = IXGBE_SUCCESS;
1565 	u16 i;
1566 
1567 	DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
1568 
1569 	for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1570 		data[i] = i;
1571 
1572 	hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1573 	status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1574 					     IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1575 	hw->eeprom.word_page_size = 0;
1576 	if (status != IXGBE_SUCCESS)
1577 		goto out;
1578 
1579 	status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1580 	if (status != IXGBE_SUCCESS)
1581 		goto out;
1582 
1583 	/*
1584 	 * When writing in burst more than the actual page size
1585 	 * EEPROM address wraps around current page.
1586 	 */
1587 	hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1588 
1589 	DEBUGOUT1("Detected EEPROM page size = %d words.",
1590 		  hw->eeprom.word_page_size);
1591 out:
1592 	return status;
1593 }
1594 
1595 /**
1596  *  ixgbe_read_eerd_generic - Read EEPROM word using EERD
1597  *  @hw: pointer to hardware structure
1598  *  @offset: offset of  word in the EEPROM to read
1599  *  @data: word read from the EEPROM
1600  *
1601  *  Reads a 16 bit word from the EEPROM using the EERD register.
1602  **/
1603 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1604 {
1605 	return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1606 }
1607 
1608 /**
1609  *  ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1610  *  @hw: pointer to hardware structure
1611  *  @offset: offset of  word in the EEPROM to write
1612  *  @words: number of word(s)
1613  *  @data: word(s) write to the EEPROM
1614  *
1615  *  Write a 16 bit word(s) to the EEPROM using the EEWR register.
1616  **/
1617 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1618 				    u16 words, u16 *data)
1619 {
1620 	u32 eewr;
1621 	s32 status = IXGBE_SUCCESS;
1622 	u16 i;
1623 
1624 	DEBUGFUNC("ixgbe_write_eewr_generic");
1625 
1626 	hw->eeprom.ops.init_params(hw);
1627 
1628 	if (words == 0) {
1629 		status = IXGBE_ERR_INVALID_ARGUMENT;
1630 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1631 		goto out;
1632 	}
1633 
1634 	if (offset >= hw->eeprom.word_size) {
1635 		status = IXGBE_ERR_EEPROM;
1636 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1637 		goto out;
1638 	}
1639 
1640 	for (i = 0; i < words; i++) {
1641 		eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1642 			(data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1643 			IXGBE_EEPROM_RW_REG_START;
1644 
1645 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1646 		if (status != IXGBE_SUCCESS) {
1647 			DEBUGOUT("Eeprom write EEWR timed out\n");
1648 			goto out;
1649 		}
1650 
1651 		IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1652 
1653 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1654 		if (status != IXGBE_SUCCESS) {
1655 			DEBUGOUT("Eeprom write EEWR timed out\n");
1656 			goto out;
1657 		}
1658 	}
1659 
1660 out:
1661 	return status;
1662 }
1663 
1664 /**
1665  *  ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1666  *  @hw: pointer to hardware structure
1667  *  @offset: offset of  word in the EEPROM to write
1668  *  @data: word write to the EEPROM
1669  *
1670  *  Write a 16 bit word to the EEPROM using the EEWR register.
1671  **/
1672 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1673 {
1674 	return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1675 }
1676 
1677 /**
1678  *  ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1679  *  @hw: pointer to hardware structure
1680  *  @ee_reg: EEPROM flag for polling
1681  *
1682  *  Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1683  *  read or write is done respectively.
1684  **/
1685 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1686 {
1687 	u32 i;
1688 	u32 reg;
1689 	s32 status = IXGBE_ERR_EEPROM;
1690 
1691 	DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1692 
1693 	for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1694 		if (ee_reg == IXGBE_NVM_POLL_READ)
1695 			reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1696 		else
1697 			reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1698 
1699 		if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1700 			status = IXGBE_SUCCESS;
1701 			break;
1702 		}
1703 		usec_delay(5);
1704 	}
1705 
1706 	if (i == IXGBE_EERD_EEWR_ATTEMPTS)
1707 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
1708 			     "EEPROM read/write done polling timed out");
1709 
1710 	return status;
1711 }
1712 
1713 /**
1714  *  ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1715  *  @hw: pointer to hardware structure
1716  *
1717  *  Prepares EEPROM for access using bit-bang method. This function should
1718  *  be called before issuing a command to the EEPROM.
1719  **/
1720 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1721 {
1722 	s32 status = IXGBE_SUCCESS;
1723 	u32 eec;
1724 	u32 i;
1725 
1726 	DEBUGFUNC("ixgbe_acquire_eeprom");
1727 
1728 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1729 	    != IXGBE_SUCCESS)
1730 		status = IXGBE_ERR_SWFW_SYNC;
1731 
1732 	if (status == IXGBE_SUCCESS) {
1733 		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1734 
1735 		/* Request EEPROM Access */
1736 		eec |= IXGBE_EEC_REQ;
1737 		IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1738 
1739 		for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1740 			eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1741 			if (eec & IXGBE_EEC_GNT)
1742 				break;
1743 			usec_delay(5);
1744 		}
1745 
1746 		/* Release if grant not acquired */
1747 		if (!(eec & IXGBE_EEC_GNT)) {
1748 			eec &= ~IXGBE_EEC_REQ;
1749 			IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1750 			DEBUGOUT("Could not acquire EEPROM grant\n");
1751 
1752 			hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1753 			status = IXGBE_ERR_EEPROM;
1754 		}
1755 
1756 		/* Setup EEPROM for Read/Write */
1757 		if (status == IXGBE_SUCCESS) {
1758 			/* Clear CS and SK */
1759 			eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1760 			IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1761 			IXGBE_WRITE_FLUSH(hw);
1762 			usec_delay(1);
1763 		}
1764 	}
1765 	return status;
1766 }
1767 
1768 /**
1769  *  ixgbe_get_eeprom_semaphore - Get hardware semaphore
1770  *  @hw: pointer to hardware structure
1771  *
1772  *  Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1773  **/
1774 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1775 {
1776 	s32 status = IXGBE_ERR_EEPROM;
1777 	u32 timeout = 2000;
1778 	u32 i;
1779 	u32 swsm;
1780 
1781 	DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1782 
1783 
1784 	/* Get SMBI software semaphore between device drivers first */
1785 	for (i = 0; i < timeout; i++) {
1786 		/*
1787 		 * If the SMBI bit is 0 when we read it, then the bit will be
1788 		 * set and we have the semaphore
1789 		 */
1790 		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1791 		if (!(swsm & IXGBE_SWSM_SMBI)) {
1792 			status = IXGBE_SUCCESS;
1793 			break;
1794 		}
1795 		usec_delay(50);
1796 	}
1797 
1798 	if (i == timeout) {
1799 		DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1800 			 "not granted.\n");
1801 		/*
1802 		 * this release is particularly important because our attempts
1803 		 * above to get the semaphore may have succeeded, and if there
1804 		 * was a timeout, we should unconditionally clear the semaphore
1805 		 * bits to free the driver to make progress
1806 		 */
1807 		ixgbe_release_eeprom_semaphore(hw);
1808 
1809 		usec_delay(50);
1810 		/*
1811 		 * one last try
1812 		 * If the SMBI bit is 0 when we read it, then the bit will be
1813 		 * set and we have the semaphore
1814 		 */
1815 		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1816 		if (!(swsm & IXGBE_SWSM_SMBI))
1817 			status = IXGBE_SUCCESS;
1818 	}
1819 
1820 	/* Now get the semaphore between SW/FW through the SWESMBI bit */
1821 	if (status == IXGBE_SUCCESS) {
1822 		for (i = 0; i < timeout; i++) {
1823 			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1824 
1825 			/* Set the SW EEPROM semaphore bit to request access */
1826 			swsm |= IXGBE_SWSM_SWESMBI;
1827 			IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1828 
1829 			/*
1830 			 * If we set the bit successfully then we got the
1831 			 * semaphore.
1832 			 */
1833 			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1834 			if (swsm & IXGBE_SWSM_SWESMBI)
1835 				break;
1836 
1837 			usec_delay(50);
1838 		}
1839 
1840 		/*
1841 		 * Release semaphores and return error if SW EEPROM semaphore
1842 		 * was not granted because we don't have access to the EEPROM
1843 		 */
1844 		if (i >= timeout) {
1845 			ERROR_REPORT1(IXGBE_ERROR_POLLING,
1846 			    "SWESMBI Software EEPROM semaphore not granted.\n");
1847 			ixgbe_release_eeprom_semaphore(hw);
1848 			status = IXGBE_ERR_EEPROM;
1849 		}
1850 	} else {
1851 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
1852 			     "Software semaphore SMBI between device drivers "
1853 			     "not granted.\n");
1854 	}
1855 
1856 	return status;
1857 }
1858 
1859 /**
1860  *  ixgbe_release_eeprom_semaphore - Release hardware semaphore
1861  *  @hw: pointer to hardware structure
1862  *
1863  *  This function clears hardware semaphore bits.
1864  **/
1865 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1866 {
1867 	u32 swsm;
1868 
1869 	DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1870 
1871 	swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1872 
1873 	/* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1874 	swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1875 	IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1876 	IXGBE_WRITE_FLUSH(hw);
1877 }
1878 
1879 /**
1880  *  ixgbe_ready_eeprom - Polls for EEPROM ready
1881  *  @hw: pointer to hardware structure
1882  **/
1883 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1884 {
1885 	s32 status = IXGBE_SUCCESS;
1886 	u16 i;
1887 	u8 spi_stat_reg;
1888 
1889 	DEBUGFUNC("ixgbe_ready_eeprom");
1890 
1891 	/*
1892 	 * Read "Status Register" repeatedly until the LSB is cleared.  The
1893 	 * EEPROM will signal that the command has been completed by clearing
1894 	 * bit 0 of the internal status register.  If it's not cleared within
1895 	 * 5 milliseconds, then error out.
1896 	 */
1897 	for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1898 		ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1899 					    IXGBE_EEPROM_OPCODE_BITS);
1900 		spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
1901 		if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1902 			break;
1903 
1904 		usec_delay(5);
1905 		ixgbe_standby_eeprom(hw);
1906 	};
1907 
1908 	/*
1909 	 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
1910 	 * devices (and only 0-5mSec on 5V devices)
1911 	 */
1912 	if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
1913 		DEBUGOUT("SPI EEPROM Status error\n");
1914 		status = IXGBE_ERR_EEPROM;
1915 	}
1916 
1917 	return status;
1918 }
1919 
1920 /**
1921  *  ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
1922  *  @hw: pointer to hardware structure
1923  **/
1924 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1925 {
1926 	u32 eec;
1927 
1928 	DEBUGFUNC("ixgbe_standby_eeprom");
1929 
1930 	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1931 
1932 	/* Toggle CS to flush commands */
1933 	eec |= IXGBE_EEC_CS;
1934 	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1935 	IXGBE_WRITE_FLUSH(hw);
1936 	usec_delay(1);
1937 	eec &= ~IXGBE_EEC_CS;
1938 	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1939 	IXGBE_WRITE_FLUSH(hw);
1940 	usec_delay(1);
1941 }
1942 
1943 /**
1944  *  ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
1945  *  @hw: pointer to hardware structure
1946  *  @data: data to send to the EEPROM
1947  *  @count: number of bits to shift out
1948  **/
1949 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
1950 					u16 count)
1951 {
1952 	u32 eec;
1953 	u32 mask;
1954 	u32 i;
1955 
1956 	DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
1957 
1958 	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1959 
1960 	/*
1961 	 * Mask is used to shift "count" bits of "data" out to the EEPROM
1962 	 * one bit at a time.  Determine the starting bit based on count
1963 	 */
1964 	mask = 0x01 << (count - 1);
1965 
1966 	for (i = 0; i < count; i++) {
1967 		/*
1968 		 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
1969 		 * "1", and then raising and then lowering the clock (the SK
1970 		 * bit controls the clock input to the EEPROM).  A "0" is
1971 		 * shifted out to the EEPROM by setting "DI" to "0" and then
1972 		 * raising and then lowering the clock.
1973 		 */
1974 		if (data & mask)
1975 			eec |= IXGBE_EEC_DI;
1976 		else
1977 			eec &= ~IXGBE_EEC_DI;
1978 
1979 		IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1980 		IXGBE_WRITE_FLUSH(hw);
1981 
1982 		usec_delay(1);
1983 
1984 		ixgbe_raise_eeprom_clk(hw, &eec);
1985 		ixgbe_lower_eeprom_clk(hw, &eec);
1986 
1987 		/*
1988 		 * Shift mask to signify next bit of data to shift in to the
1989 		 * EEPROM
1990 		 */
1991 		mask = mask >> 1;
1992 	};
1993 
1994 	/* We leave the "DI" bit set to "0" when we leave this routine. */
1995 	eec &= ~IXGBE_EEC_DI;
1996 	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1997 	IXGBE_WRITE_FLUSH(hw);
1998 }
1999 
2000 /**
2001  *  ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
2002  *  @hw: pointer to hardware structure
2003  **/
2004 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
2005 {
2006 	u32 eec;
2007 	u32 i;
2008 	u16 data = 0;
2009 
2010 	DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
2011 
2012 	/*
2013 	 * In order to read a register from the EEPROM, we need to shift
2014 	 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
2015 	 * the clock input to the EEPROM (setting the SK bit), and then reading
2016 	 * the value of the "DO" bit.  During this "shifting in" process the
2017 	 * "DI" bit should always be clear.
2018 	 */
2019 	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
2020 
2021 	eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
2022 
2023 	for (i = 0; i < count; i++) {
2024 		data = data << 1;
2025 		ixgbe_raise_eeprom_clk(hw, &eec);
2026 
2027 		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
2028 
2029 		eec &= ~(IXGBE_EEC_DI);
2030 		if (eec & IXGBE_EEC_DO)
2031 			data |= 1;
2032 
2033 		ixgbe_lower_eeprom_clk(hw, &eec);
2034 	}
2035 
2036 	return data;
2037 }
2038 
2039 /**
2040  *  ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
2041  *  @hw: pointer to hardware structure
2042  *  @eec: EEC register's current value
2043  **/
2044 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2045 {
2046 	DEBUGFUNC("ixgbe_raise_eeprom_clk");
2047 
2048 	/*
2049 	 * Raise the clock input to the EEPROM
2050 	 * (setting the SK bit), then delay
2051 	 */
2052 	*eec = *eec | IXGBE_EEC_SK;
2053 	IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
2054 	IXGBE_WRITE_FLUSH(hw);
2055 	usec_delay(1);
2056 }
2057 
2058 /**
2059  *  ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
2060  *  @hw: pointer to hardware structure
2061  *  @eecd: EECD's current value
2062  **/
2063 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2064 {
2065 	DEBUGFUNC("ixgbe_lower_eeprom_clk");
2066 
2067 	/*
2068 	 * Lower the clock input to the EEPROM (clearing the SK bit), then
2069 	 * delay
2070 	 */
2071 	*eec = *eec & ~IXGBE_EEC_SK;
2072 	IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
2073 	IXGBE_WRITE_FLUSH(hw);
2074 	usec_delay(1);
2075 }
2076 
2077 /**
2078  *  ixgbe_release_eeprom - Release EEPROM, release semaphores
2079  *  @hw: pointer to hardware structure
2080  **/
2081 static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
2082 {
2083 	u32 eec;
2084 
2085 	DEBUGFUNC("ixgbe_release_eeprom");
2086 
2087 	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
2088 
2089 	eec |= IXGBE_EEC_CS;  /* Pull CS high */
2090 	eec &= ~IXGBE_EEC_SK; /* Lower SCK */
2091 
2092 	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
2093 	IXGBE_WRITE_FLUSH(hw);
2094 
2095 	usec_delay(1);
2096 
2097 	/* Stop requesting EEPROM access */
2098 	eec &= ~IXGBE_EEC_REQ;
2099 	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
2100 
2101 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
2102 
2103 	/* Delay before attempt to obtain semaphore again to allow FW access */
2104 	msec_delay(hw->eeprom.semaphore_delay);
2105 }
2106 
2107 /**
2108  *  ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
2109  *  @hw: pointer to hardware structure
2110  **/
2111 u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
2112 {
2113 	u16 i;
2114 	u16 j;
2115 	u16 checksum = 0;
2116 	u16 length = 0;
2117 	u16 pointer = 0;
2118 	u16 word = 0;
2119 
2120 	DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
2121 
2122 	/* Include 0x0-0x3F in the checksum */
2123 	for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
2124 		if (hw->eeprom.ops.read(hw, i, &word) != IXGBE_SUCCESS) {
2125 			DEBUGOUT("EEPROM read failed\n");
2126 			break;
2127 		}
2128 		checksum += word;
2129 	}
2130 
2131 	/* Include all data from pointers except for the fw pointer */
2132 	for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
2133 		hw->eeprom.ops.read(hw, i, &pointer);
2134 
2135 		/* Make sure the pointer seems valid */
2136 		if (pointer != 0xFFFF && pointer != 0) {
2137 			hw->eeprom.ops.read(hw, pointer, &length);
2138 
2139 			if (length != 0xFFFF && length != 0) {
2140 				for (j = pointer+1; j <= pointer+length; j++) {
2141 					hw->eeprom.ops.read(hw, j, &word);
2142 					checksum += word;
2143 				}
2144 			}
2145 		}
2146 	}
2147 
2148 	checksum = (u16)IXGBE_EEPROM_SUM - checksum;
2149 
2150 	return checksum;
2151 }
2152 
2153 /**
2154  *  ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
2155  *  @hw: pointer to hardware structure
2156  *  @checksum_val: calculated checksum
2157  *
2158  *  Performs checksum calculation and validates the EEPROM checksum.  If the
2159  *  caller does not need checksum_val, the value can be NULL.
2160  **/
2161 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
2162 					   u16 *checksum_val)
2163 {
2164 	s32 status;
2165 	u16 checksum;
2166 	u16 read_checksum = 0;
2167 
2168 	DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
2169 
2170 	/*
2171 	 * Read the first word from the EEPROM. If this times out or fails, do
2172 	 * not continue or we could be in for a very long wait while every
2173 	 * EEPROM read fails
2174 	 */
2175 	status = hw->eeprom.ops.read(hw, 0, &checksum);
2176 
2177 	if (status == IXGBE_SUCCESS) {
2178 		checksum = hw->eeprom.ops.calc_checksum(hw);
2179 
2180 		hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
2181 
2182 		/*
2183 		 * Verify read checksum from EEPROM is the same as
2184 		 * calculated checksum
2185 		 */
2186 		if (read_checksum != checksum)
2187 			status = IXGBE_ERR_EEPROM_CHECKSUM;
2188 
2189 		/* If the user cares, return the calculated checksum */
2190 		if (checksum_val)
2191 			*checksum_val = checksum;
2192 	} else {
2193 		DEBUGOUT("EEPROM read failed\n");
2194 	}
2195 
2196 	return status;
2197 }
2198 
2199 /**
2200  *  ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
2201  *  @hw: pointer to hardware structure
2202  **/
2203 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
2204 {
2205 	s32 status;
2206 	u16 checksum;
2207 
2208 	DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
2209 
2210 	/*
2211 	 * Read the first word from the EEPROM. If this times out or fails, do
2212 	 * not continue or we could be in for a very long wait while every
2213 	 * EEPROM read fails
2214 	 */
2215 	status = hw->eeprom.ops.read(hw, 0, &checksum);
2216 
2217 	if (status == IXGBE_SUCCESS) {
2218 		checksum = hw->eeprom.ops.calc_checksum(hw);
2219 		status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
2220 					      checksum);
2221 	} else {
2222 		DEBUGOUT("EEPROM read failed\n");
2223 	}
2224 
2225 	return status;
2226 }
2227 
2228 /**
2229  *  ixgbe_validate_mac_addr - Validate MAC address
2230  *  @mac_addr: pointer to MAC address.
2231  *
2232  *  Tests a MAC address to ensure it is a valid Individual Address
2233  **/
2234 s32 ixgbe_validate_mac_addr(u8 *mac_addr)
2235 {
2236 	s32 status = IXGBE_SUCCESS;
2237 
2238 	DEBUGFUNC("ixgbe_validate_mac_addr");
2239 
2240 	/* Make sure it is not a multicast address */
2241 	if (IXGBE_IS_MULTICAST(mac_addr)) {
2242 		DEBUGOUT("MAC address is multicast\n");
2243 		status = IXGBE_ERR_INVALID_MAC_ADDR;
2244 	/* Not a broadcast address */
2245 	} else if (IXGBE_IS_BROADCAST(mac_addr)) {
2246 		DEBUGOUT("MAC address is broadcast\n");
2247 		status = IXGBE_ERR_INVALID_MAC_ADDR;
2248 	/* Reject the zero address */
2249 	} else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
2250 		   mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
2251 		DEBUGOUT("MAC address is all zeros\n");
2252 		status = IXGBE_ERR_INVALID_MAC_ADDR;
2253 	}
2254 	return status;
2255 }
2256 
2257 /**
2258  *  ixgbe_set_rar_generic - Set Rx address register
2259  *  @hw: pointer to hardware structure
2260  *  @index: Receive address register to write
2261  *  @addr: Address to put into receive address register
2262  *  @vmdq: VMDq "set" or "pool" index
2263  *  @enable_addr: set flag that address is active
2264  *
2265  *  Puts an ethernet address into a receive address register.
2266  **/
2267 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
2268 			  u32 enable_addr)
2269 {
2270 	u32 rar_low, rar_high;
2271 	u32 rar_entries = hw->mac.num_rar_entries;
2272 
2273 	DEBUGFUNC("ixgbe_set_rar_generic");
2274 
2275 	/* Make sure we are using a valid rar index range */
2276 	if (index >= rar_entries) {
2277 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2278 			     "RAR index %d is out of range.\n", index);
2279 		return IXGBE_ERR_INVALID_ARGUMENT;
2280 	}
2281 
2282 	/* setup VMDq pool selection before this RAR gets enabled */
2283 	hw->mac.ops.set_vmdq(hw, index, vmdq);
2284 
2285 	/*
2286 	 * HW expects these in little endian so we reverse the byte
2287 	 * order from network order (big endian) to little endian
2288 	 */
2289 	rar_low = ((u32)addr[0] |
2290 		   ((u32)addr[1] << 8) |
2291 		   ((u32)addr[2] << 16) |
2292 		   ((u32)addr[3] << 24));
2293 	/*
2294 	 * Some parts put the VMDq setting in the extra RAH bits,
2295 	 * so save everything except the lower 16 bits that hold part
2296 	 * of the address and the address valid bit.
2297 	 */
2298 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2299 	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2300 	rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
2301 
2302 	if (enable_addr != 0)
2303 		rar_high |= IXGBE_RAH_AV;
2304 
2305 	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
2306 	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2307 
2308 	return IXGBE_SUCCESS;
2309 }
2310 
2311 /**
2312  *  ixgbe_clear_rar_generic - Remove Rx address register
2313  *  @hw: pointer to hardware structure
2314  *  @index: Receive address register to write
2315  *
2316  *  Clears an ethernet address from a receive address register.
2317  **/
2318 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
2319 {
2320 	u32 rar_high;
2321 	u32 rar_entries = hw->mac.num_rar_entries;
2322 
2323 	DEBUGFUNC("ixgbe_clear_rar_generic");
2324 
2325 	/* Make sure we are using a valid rar index range */
2326 	if (index >= rar_entries) {
2327 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2328 			     "RAR index %d is out of range.\n", index);
2329 		return IXGBE_ERR_INVALID_ARGUMENT;
2330 	}
2331 
2332 	/*
2333 	 * Some parts put the VMDq setting in the extra RAH bits,
2334 	 * so save everything except the lower 16 bits that hold part
2335 	 * of the address and the address valid bit.
2336 	 */
2337 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2338 	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2339 
2340 	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
2341 	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2342 
2343 	/* clear VMDq pool/queue selection for this RAR */
2344 	hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
2345 
2346 	return IXGBE_SUCCESS;
2347 }
2348 
2349 /**
2350  *  ixgbe_init_rx_addrs_generic - Initializes receive address filters.
2351  *  @hw: pointer to hardware structure
2352  *
2353  *  Places the MAC address in receive address register 0 and clears the rest
2354  *  of the receive address registers. Clears the multicast table. Assumes
2355  *  the receiver is in reset when the routine is called.
2356  **/
2357 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
2358 {
2359 	u32 i;
2360 	u32 rar_entries = hw->mac.num_rar_entries;
2361 
2362 	DEBUGFUNC("ixgbe_init_rx_addrs_generic");
2363 
2364 	/*
2365 	 * If the current mac address is valid, assume it is a software override
2366 	 * to the permanent address.
2367 	 * Otherwise, use the permanent address from the eeprom.
2368 	 */
2369 	if (ixgbe_validate_mac_addr(hw->mac.addr) ==
2370 	    IXGBE_ERR_INVALID_MAC_ADDR) {
2371 		/* Get the MAC address from the RAR0 for later reference */
2372 		hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2373 
2374 		DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
2375 			  hw->mac.addr[0], hw->mac.addr[1],
2376 			  hw->mac.addr[2]);
2377 		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2378 			  hw->mac.addr[4], hw->mac.addr[5]);
2379 	} else {
2380 		/* Setup the receive address. */
2381 		DEBUGOUT("Overriding MAC Address in RAR[0]\n");
2382 		DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
2383 			  hw->mac.addr[0], hw->mac.addr[1],
2384 			  hw->mac.addr[2]);
2385 		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2386 			  hw->mac.addr[4], hw->mac.addr[5]);
2387 
2388 		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2389 
2390 		/* clear VMDq pool/queue selection for RAR 0 */
2391 		hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
2392 	}
2393 	hw->addr_ctrl.overflow_promisc = 0;
2394 
2395 	hw->addr_ctrl.rar_used_count = 1;
2396 
2397 	/* Zero out the other receive addresses. */
2398 	DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
2399 	for (i = 1; i < rar_entries; i++) {
2400 		IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
2401 		IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
2402 	}
2403 
2404 	/* Clear the MTA */
2405 	hw->addr_ctrl.mta_in_use = 0;
2406 	IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2407 
2408 	DEBUGOUT(" Clearing MTA\n");
2409 	for (i = 0; i < hw->mac.mcft_size; i++)
2410 		IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
2411 
2412 	ixgbe_init_uta_tables(hw);
2413 
2414 	return IXGBE_SUCCESS;
2415 }
2416 
2417 /**
2418  *  ixgbe_add_uc_addr - Adds a secondary unicast address.
2419  *  @hw: pointer to hardware structure
2420  *  @addr: new address
2421  *
2422  *  Adds it to unused receive address register or goes into promiscuous mode.
2423  **/
2424 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
2425 {
2426 	u32 rar_entries = hw->mac.num_rar_entries;
2427 	u32 rar;
2428 
2429 	DEBUGFUNC("ixgbe_add_uc_addr");
2430 
2431 	DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
2432 		  addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
2433 
2434 	/*
2435 	 * Place this address in the RAR if there is room,
2436 	 * else put the controller into promiscuous mode
2437 	 */
2438 	if (hw->addr_ctrl.rar_used_count < rar_entries) {
2439 		rar = hw->addr_ctrl.rar_used_count;
2440 		hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2441 		DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
2442 		hw->addr_ctrl.rar_used_count++;
2443 	} else {
2444 		hw->addr_ctrl.overflow_promisc++;
2445 	}
2446 
2447 	DEBUGOUT("ixgbe_add_uc_addr Complete\n");
2448 }
2449 
2450 /**
2451  *  ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
2452  *  @hw: pointer to hardware structure
2453  *  @addr_list: the list of new addresses
2454  *  @addr_count: number of addresses
2455  *  @next: iterator function to walk the address list
2456  *
2457  *  The given list replaces any existing list.  Clears the secondary addrs from
2458  *  receive address registers.  Uses unused receive address registers for the
2459  *  first secondary addresses, and falls back to promiscuous mode as needed.
2460  *
2461  *  Drivers using secondary unicast addresses must set user_set_promisc when
2462  *  manually putting the device into promiscuous mode.
2463  **/
2464 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
2465 				      u32 addr_count, ixgbe_mc_addr_itr next)
2466 {
2467 	u8 *addr;
2468 	u32 i;
2469 	u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
2470 	u32 uc_addr_in_use;
2471 	u32 fctrl;
2472 	u32 vmdq;
2473 
2474 	DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
2475 
2476 	/*
2477 	 * Clear accounting of old secondary address list,
2478 	 * don't count RAR[0]
2479 	 */
2480 	uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
2481 	hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
2482 	hw->addr_ctrl.overflow_promisc = 0;
2483 
2484 	/* Zero out the other receive addresses */
2485 	DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
2486 	for (i = 0; i < uc_addr_in_use; i++) {
2487 		IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
2488 		IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
2489 	}
2490 
2491 	/* Add the new addresses */
2492 	for (i = 0; i < addr_count; i++) {
2493 		DEBUGOUT(" Adding the secondary addresses:\n");
2494 		addr = next(hw, &addr_list, &vmdq);
2495 		ixgbe_add_uc_addr(hw, addr, vmdq);
2496 	}
2497 
2498 	if (hw->addr_ctrl.overflow_promisc) {
2499 		/* enable promisc if not already in overflow or set by user */
2500 		if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2501 			DEBUGOUT(" Entering address overflow promisc mode\n");
2502 			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2503 			fctrl |= IXGBE_FCTRL_UPE;
2504 			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2505 		}
2506 	} else {
2507 		/* only disable if set by overflow, not by user */
2508 		if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2509 			DEBUGOUT(" Leaving address overflow promisc mode\n");
2510 			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2511 			fctrl &= ~IXGBE_FCTRL_UPE;
2512 			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2513 		}
2514 	}
2515 
2516 	DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
2517 	return IXGBE_SUCCESS;
2518 }
2519 
2520 /**
2521  *  ixgbe_mta_vector - Determines bit-vector in multicast table to set
2522  *  @hw: pointer to hardware structure
2523  *  @mc_addr: the multicast address
2524  *
2525  *  Extracts the 12 bits, from a multicast address, to determine which
2526  *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
2527  *  incoming rx multicast addresses, to determine the bit-vector to check in
2528  *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
2529  *  by the MO field of the MCSTCTRL. The MO field is set during initialization
2530  *  to mc_filter_type.
2531  **/
2532 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
2533 {
2534 	u32 vector = 0;
2535 
2536 	DEBUGFUNC("ixgbe_mta_vector");
2537 
2538 	switch (hw->mac.mc_filter_type) {
2539 	case 0:   /* use bits [47:36] of the address */
2540 		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
2541 		break;
2542 	case 1:   /* use bits [46:35] of the address */
2543 		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
2544 		break;
2545 	case 2:   /* use bits [45:34] of the address */
2546 		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
2547 		break;
2548 	case 3:   /* use bits [43:32] of the address */
2549 		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
2550 		break;
2551 	default:  /* Invalid mc_filter_type */
2552 		DEBUGOUT("MC filter type param set incorrectly\n");
2553 		ASSERT(0);
2554 		break;
2555 	}
2556 
2557 	/* vector can only be 12-bits or boundary will be exceeded */
2558 	vector &= 0xFFF;
2559 	return vector;
2560 }
2561 
2562 /**
2563  *  ixgbe_set_mta - Set bit-vector in multicast table
2564  *  @hw: pointer to hardware structure
2565  *  @hash_value: Multicast address hash value
2566  *
2567  *  Sets the bit-vector in the multicast table.
2568  **/
2569 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
2570 {
2571 	u32 vector;
2572 	u32 vector_bit;
2573 	u32 vector_reg;
2574 
2575 	DEBUGFUNC("ixgbe_set_mta");
2576 
2577 	hw->addr_ctrl.mta_in_use++;
2578 
2579 	vector = ixgbe_mta_vector(hw, mc_addr);
2580 	DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
2581 
2582 	/*
2583 	 * The MTA is a register array of 128 32-bit registers. It is treated
2584 	 * like an array of 4096 bits.  We want to set bit
2585 	 * BitArray[vector_value]. So we figure out what register the bit is
2586 	 * in, read it, OR in the new bit, then write back the new value.  The
2587 	 * register is determined by the upper 7 bits of the vector value and
2588 	 * the bit within that register are determined by the lower 5 bits of
2589 	 * the value.
2590 	 */
2591 	vector_reg = (vector >> 5) & 0x7F;
2592 	vector_bit = vector & 0x1F;
2593 	hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2594 }
2595 
2596 /**
2597  *  ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2598  *  @hw: pointer to hardware structure
2599  *  @mc_addr_list: the list of new multicast addresses
2600  *  @mc_addr_count: number of addresses
2601  *  @next: iterator function to walk the multicast address list
2602  *  @clear: flag, when set clears the table beforehand
2603  *
2604  *  When the clear flag is set, the given list replaces any existing list.
2605  *  Hashes the given addresses into the multicast table.
2606  **/
2607 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
2608 				      u32 mc_addr_count, ixgbe_mc_addr_itr next,
2609 				      bool clear)
2610 {
2611 	u32 i;
2612 	u32 vmdq;
2613 
2614 	DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2615 
2616 	/*
2617 	 * Set the new number of MC addresses that we are being requested to
2618 	 * use.
2619 	 */
2620 	hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2621 	hw->addr_ctrl.mta_in_use = 0;
2622 
2623 	/* Clear mta_shadow */
2624 	if (clear) {
2625 		DEBUGOUT(" Clearing MTA\n");
2626 		memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2627 	}
2628 
2629 	/* Update mta_shadow */
2630 	for (i = 0; i < mc_addr_count; i++) {
2631 		DEBUGOUT(" Adding the multicast addresses:\n");
2632 		ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2633 	}
2634 
2635 	/* Enable mta */
2636 	for (i = 0; i < hw->mac.mcft_size; i++)
2637 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2638 				      hw->mac.mta_shadow[i]);
2639 
2640 	if (hw->addr_ctrl.mta_in_use > 0)
2641 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2642 				IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2643 
2644 	DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2645 	return IXGBE_SUCCESS;
2646 }
2647 
2648 /**
2649  *  ixgbe_enable_mc_generic - Enable multicast address in RAR
2650  *  @hw: pointer to hardware structure
2651  *
2652  *  Enables multicast address in RAR and the use of the multicast hash table.
2653  **/
2654 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2655 {
2656 	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2657 
2658 	DEBUGFUNC("ixgbe_enable_mc_generic");
2659 
2660 	if (a->mta_in_use > 0)
2661 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2662 				hw->mac.mc_filter_type);
2663 
2664 	return IXGBE_SUCCESS;
2665 }
2666 
2667 /**
2668  *  ixgbe_disable_mc_generic - Disable multicast address in RAR
2669  *  @hw: pointer to hardware structure
2670  *
2671  *  Disables multicast address in RAR and the use of the multicast hash table.
2672  **/
2673 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2674 {
2675 	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2676 
2677 	DEBUGFUNC("ixgbe_disable_mc_generic");
2678 
2679 	if (a->mta_in_use > 0)
2680 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2681 
2682 	return IXGBE_SUCCESS;
2683 }
2684 
2685 /**
2686  *  ixgbe_fc_enable_generic - Enable flow control
2687  *  @hw: pointer to hardware structure
2688  *
2689  *  Enable flow control according to the current settings.
2690  **/
2691 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2692 {
2693 	s32 ret_val = IXGBE_SUCCESS;
2694 	u32 mflcn_reg, fccfg_reg;
2695 	u32 reg;
2696 	u32 fcrtl, fcrth;
2697 	int i;
2698 
2699 	DEBUGFUNC("ixgbe_fc_enable_generic");
2700 
2701 	/* Validate the water mark configuration */
2702 	if (!hw->fc.pause_time) {
2703 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2704 		goto out;
2705 	}
2706 
2707 	/* Low water mark of zero causes XOFF floods */
2708 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2709 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2710 		    hw->fc.high_water[i]) {
2711 			if (!hw->fc.low_water[i] ||
2712 			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2713 				DEBUGOUT("Invalid water mark configuration\n");
2714 				ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2715 				goto out;
2716 			}
2717 		}
2718 	}
2719 
2720 	/* Negotiate the fc mode to use */
2721 	ixgbe_fc_autoneg(hw);
2722 
2723 	/* Disable any previous flow control settings */
2724 	mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2725 	mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2726 
2727 	fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2728 	fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2729 
2730 	/*
2731 	 * The possible values of fc.current_mode are:
2732 	 * 0: Flow control is completely disabled
2733 	 * 1: Rx flow control is enabled (we can receive pause frames,
2734 	 *    but not send pause frames).
2735 	 * 2: Tx flow control is enabled (we can send pause frames but
2736 	 *    we do not support receiving pause frames).
2737 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2738 	 * other: Invalid.
2739 	 */
2740 	switch (hw->fc.current_mode) {
2741 	case ixgbe_fc_none:
2742 		/*
2743 		 * Flow control is disabled by software override or autoneg.
2744 		 * The code below will actually disable it in the HW.
2745 		 */
2746 		break;
2747 	case ixgbe_fc_rx_pause:
2748 		/*
2749 		 * Rx Flow control is enabled and Tx Flow control is
2750 		 * disabled by software override. Since there really
2751 		 * isn't a way to advertise that we are capable of RX
2752 		 * Pause ONLY, we will advertise that we support both
2753 		 * symmetric and asymmetric Rx PAUSE.  Later, we will
2754 		 * disable the adapter's ability to send PAUSE frames.
2755 		 */
2756 		mflcn_reg |= IXGBE_MFLCN_RFCE;
2757 		break;
2758 	case ixgbe_fc_tx_pause:
2759 		/*
2760 		 * Tx Flow control is enabled, and Rx Flow control is
2761 		 * disabled by software override.
2762 		 */
2763 		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2764 		break;
2765 	case ixgbe_fc_full:
2766 		/* Flow control (both Rx and Tx) is enabled by SW override. */
2767 		mflcn_reg |= IXGBE_MFLCN_RFCE;
2768 		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2769 		break;
2770 	default:
2771 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
2772 			     "Flow control param set incorrectly\n");
2773 		ret_val = IXGBE_ERR_CONFIG;
2774 		goto out;
2775 		break;
2776 	}
2777 
2778 	/* Set 802.3x based flow control settings. */
2779 	mflcn_reg |= IXGBE_MFLCN_DPF;
2780 	IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2781 	IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2782 
2783 
2784 	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
2785 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2786 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2787 		    hw->fc.high_water[i]) {
2788 			fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2789 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2790 			fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2791 		} else {
2792 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2793 			/*
2794 			 * In order to prevent Tx hangs when the internal Tx
2795 			 * switch is enabled we must set the high water mark
2796 			 * to the maximum FCRTH value.  This allows the Tx
2797 			 * switch to function even under heavy Rx workloads.
2798 			 */
2799 			fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
2800 		}
2801 
2802 		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2803 	}
2804 
2805 	/* Configure pause time (2 TCs per register) */
2806 	reg = hw->fc.pause_time * 0x00010001;
2807 	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2808 		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2809 
2810 	/* Configure flow control refresh threshold value */
2811 	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2812 
2813 out:
2814 	return ret_val;
2815 }
2816 
2817 /**
2818  *  ixgbe_negotiate_fc - Negotiate flow control
2819  *  @hw: pointer to hardware structure
2820  *  @adv_reg: flow control advertised settings
2821  *  @lp_reg: link partner's flow control settings
2822  *  @adv_sym: symmetric pause bit in advertisement
2823  *  @adv_asm: asymmetric pause bit in advertisement
2824  *  @lp_sym: symmetric pause bit in link partner advertisement
2825  *  @lp_asm: asymmetric pause bit in link partner advertisement
2826  *
2827  *  Find the intersection between advertised settings and link partner's
2828  *  advertised settings
2829  **/
2830 static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2831 			      u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2832 {
2833 	if ((!(adv_reg)) ||  (!(lp_reg))) {
2834 		ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
2835 			     "Local or link partner's advertised flow control "
2836 			     "settings are NULL. Local: %x, link partner: %x\n",
2837 			     adv_reg, lp_reg);
2838 		return IXGBE_ERR_FC_NOT_NEGOTIATED;
2839 	}
2840 
2841 	if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2842 		/*
2843 		 * Now we need to check if the user selected Rx ONLY
2844 		 * of pause frames.  In this case, we had to advertise
2845 		 * FULL flow control because we could not advertise RX
2846 		 * ONLY. Hence, we must now check to see if we need to
2847 		 * turn OFF the TRANSMISSION of PAUSE frames.
2848 		 */
2849 		if (hw->fc.requested_mode == ixgbe_fc_full) {
2850 			hw->fc.current_mode = ixgbe_fc_full;
2851 			DEBUGOUT("Flow Control = FULL.\n");
2852 		} else {
2853 			hw->fc.current_mode = ixgbe_fc_rx_pause;
2854 			DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2855 		}
2856 	} else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2857 		   (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2858 		hw->fc.current_mode = ixgbe_fc_tx_pause;
2859 		DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2860 	} else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2861 		   !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2862 		hw->fc.current_mode = ixgbe_fc_rx_pause;
2863 		DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2864 	} else {
2865 		hw->fc.current_mode = ixgbe_fc_none;
2866 		DEBUGOUT("Flow Control = NONE.\n");
2867 	}
2868 	return IXGBE_SUCCESS;
2869 }
2870 
2871 /**
2872  *  ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2873  *  @hw: pointer to hardware structure
2874  *
2875  *  Enable flow control according on 1 gig fiber.
2876  **/
2877 static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2878 {
2879 	u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
2880 	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2881 
2882 	/*
2883 	 * On multispeed fiber at 1g, bail out if
2884 	 * - link is up but AN did not complete, or if
2885 	 * - link is up and AN completed but timed out
2886 	 */
2887 
2888 	linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2889 	if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2890 	    (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
2891 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
2892 			     "Auto-Negotiation did not complete or timed out");
2893 		goto out;
2894 	}
2895 
2896 	pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2897 	pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
2898 
2899 	ret_val =  ixgbe_negotiate_fc(hw, pcs_anadv_reg,
2900 				      pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
2901 				      IXGBE_PCS1GANA_ASM_PAUSE,
2902 				      IXGBE_PCS1GANA_SYM_PAUSE,
2903 				      IXGBE_PCS1GANA_ASM_PAUSE);
2904 
2905 out:
2906 	return ret_val;
2907 }
2908 
2909 /**
2910  *  ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
2911  *  @hw: pointer to hardware structure
2912  *
2913  *  Enable flow control according to IEEE clause 37.
2914  **/
2915 static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2916 {
2917 	u32 links2, anlp1_reg, autoc_reg, links;
2918 	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2919 
2920 	/*
2921 	 * On backplane, bail out if
2922 	 * - backplane autoneg was not completed, or if
2923 	 * - we are 82599 and link partner is not AN enabled
2924 	 */
2925 	links = IXGBE_READ_REG(hw, IXGBE_LINKS);
2926 	if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
2927 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
2928 			     "Auto-Negotiation did not complete");
2929 		goto out;
2930 	}
2931 
2932 	if (hw->mac.type == ixgbe_mac_82599EB) {
2933 		links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2934 		if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
2935 			ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
2936 				     "Link partner is not AN enabled");
2937 			goto out;
2938 		}
2939 	}
2940 	/*
2941 	 * Read the 10g AN autoc and LP ability registers and resolve
2942 	 * local flow control settings accordingly
2943 	 */
2944 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2945 	anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2946 
2947 	ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
2948 		anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
2949 		IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
2950 
2951 out:
2952 	return ret_val;
2953 }
2954 
2955 /**
2956  *  ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
2957  *  @hw: pointer to hardware structure
2958  *
2959  *  Enable flow control according to IEEE clause 37.
2960  **/
2961 static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
2962 {
2963 	u16 technology_ability_reg = 0;
2964 	u16 lp_technology_ability_reg = 0;
2965 
2966 	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
2967 			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2968 			     &technology_ability_reg);
2969 	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
2970 			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2971 			     &lp_technology_ability_reg);
2972 
2973 	return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
2974 				  (u32)lp_technology_ability_reg,
2975 				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
2976 				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
2977 }
2978 
2979 /**
2980  *  ixgbe_fc_autoneg - Configure flow control
2981  *  @hw: pointer to hardware structure
2982  *
2983  *  Compares our advertised flow control capabilities to those advertised by
2984  *  our link partner, and determines the proper flow control mode to use.
2985  **/
2986 void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2987 {
2988 	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2989 	ixgbe_link_speed speed;
2990 	bool link_up;
2991 
2992 	DEBUGFUNC("ixgbe_fc_autoneg");
2993 
2994 	/*
2995 	 * AN should have completed when the cable was plugged in.
2996 	 * Look for reasons to bail out.  Bail out if:
2997 	 * - FC autoneg is disabled, or if
2998 	 * - link is not up.
2999 	 */
3000 	if (hw->fc.disable_fc_autoneg) {
3001 		ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
3002 			     "Flow control autoneg is disabled");
3003 		goto out;
3004 	}
3005 
3006 	hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
3007 	if (!link_up) {
3008 		ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
3009 		goto out;
3010 	}
3011 
3012 	switch (hw->phy.media_type) {
3013 	/* Autoneg flow control on fiber adapters */
3014 	case ixgbe_media_type_fiber_fixed:
3015 	case ixgbe_media_type_fiber:
3016 		if (speed == IXGBE_LINK_SPEED_1GB_FULL)
3017 			ret_val = ixgbe_fc_autoneg_fiber(hw);
3018 		break;
3019 
3020 	/* Autoneg flow control on backplane adapters */
3021 	case ixgbe_media_type_backplane:
3022 		ret_val = ixgbe_fc_autoneg_backplane(hw);
3023 		break;
3024 
3025 	/* Autoneg flow control on copper adapters */
3026 	case ixgbe_media_type_copper:
3027 		if (ixgbe_device_supports_autoneg_fc(hw))
3028 			ret_val = ixgbe_fc_autoneg_copper(hw);
3029 		break;
3030 
3031 	default:
3032 		break;
3033 	}
3034 
3035 out:
3036 	if (ret_val == IXGBE_SUCCESS) {
3037 		hw->fc.fc_was_autonegged = TRUE;
3038 	} else {
3039 		hw->fc.fc_was_autonegged = FALSE;
3040 		hw->fc.current_mode = hw->fc.requested_mode;
3041 	}
3042 }
3043 
3044 /*
3045  * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
3046  * @hw: pointer to hardware structure
3047  *
3048  * System-wide timeout range is encoded in PCIe Device Control2 register.
3049  *
3050  * Add 10% to specified maximum and return the number of times to poll for
3051  * completion timeout, in units of 100 microsec.  Never return less than
3052  * 800 = 80 millisec.
3053  */
3054 static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
3055 {
3056 	s16 devctl2;
3057 	u32 pollcnt;
3058 
3059 	devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
3060 	devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
3061 
3062 	switch (devctl2) {
3063 	case IXGBE_PCIDEVCTRL2_65_130ms:
3064 		pollcnt = 1300;		/* 130 millisec */
3065 		break;
3066 	case IXGBE_PCIDEVCTRL2_260_520ms:
3067 		pollcnt = 5200;		/* 520 millisec */
3068 		break;
3069 	case IXGBE_PCIDEVCTRL2_1_2s:
3070 		pollcnt = 20000;	/* 2 sec */
3071 		break;
3072 	case IXGBE_PCIDEVCTRL2_4_8s:
3073 		pollcnt = 80000;	/* 8 sec */
3074 		break;
3075 	case IXGBE_PCIDEVCTRL2_17_34s:
3076 		pollcnt = 34000;	/* 34 sec */
3077 		break;
3078 	case IXGBE_PCIDEVCTRL2_50_100us:	/* 100 microsecs */
3079 	case IXGBE_PCIDEVCTRL2_1_2ms:		/* 2 millisecs */
3080 	case IXGBE_PCIDEVCTRL2_16_32ms:		/* 32 millisec */
3081 	case IXGBE_PCIDEVCTRL2_16_32ms_def:	/* 32 millisec default */
3082 	default:
3083 		pollcnt = 800;		/* 80 millisec minimum */
3084 		break;
3085 	}
3086 
3087 	/* add 10% to spec maximum */
3088 	return (pollcnt * 11) / 10;
3089 }
3090 
3091 /**
3092  *  ixgbe_disable_pcie_master - Disable PCI-express master access
3093  *  @hw: pointer to hardware structure
3094  *
3095  *  Disables PCI-Express master access and verifies there are no pending
3096  *  requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
3097  *  bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
3098  *  is returned signifying master requests disabled.
3099  **/
3100 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
3101 {
3102 	s32 status = IXGBE_SUCCESS;
3103 	u32 i, poll;
3104 
3105 	DEBUGFUNC("ixgbe_disable_pcie_master");
3106 
3107 	/* Always set this bit to ensure any future transactions are blocked */
3108 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
3109 
3110 	/* Exit if master requests are blocked */
3111 	if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
3112 		goto out;
3113 
3114 	/* Poll for master request bit to clear */
3115 	for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
3116 		usec_delay(100);
3117 		if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
3118 			goto out;
3119 	}
3120 
3121 	/*
3122 	 * Two consecutive resets are required via CTRL.RST per datasheet
3123 	 * 5.2.5.3.2 Master Disable.  We set a flag to inform the reset routine
3124 	 * of this need.  The first reset prevents new master requests from
3125 	 * being issued by our device.  We then must wait 1usec or more for any
3126 	 * remaining completions from the PCIe bus to trickle in, and then reset
3127 	 * again to clear out any effects they may have had on our device.
3128 	 */
3129 	DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
3130 	hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
3131 
3132 	/*
3133 	 * Before proceeding, make sure that the PCIe block does not have
3134 	 * transactions pending.
3135 	 */
3136 	poll = ixgbe_pcie_timeout_poll(hw);
3137 	for (i = 0; i < poll; i++) {
3138 		usec_delay(100);
3139 		if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
3140 		    IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
3141 			goto out;
3142 	}
3143 
3144 	ERROR_REPORT1(IXGBE_ERROR_POLLING,
3145 		     "PCIe transaction pending bit also did not clear.\n");
3146 	status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
3147 
3148 out:
3149 	return status;
3150 }
3151 
3152 /**
3153  *  ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
3154  *  @hw: pointer to hardware structure
3155  *  @mask: Mask to specify which semaphore to acquire
3156  *
3157  *  Acquires the SWFW semaphore through the GSSR register for the specified
3158  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
3159  **/
3160 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
3161 {
3162 	u32 gssr = 0;
3163 	u32 swmask = mask;
3164 	u32 fwmask = mask << 5;
3165 	u32 timeout = 200;
3166 	u32 i;
3167 
3168 	DEBUGFUNC("ixgbe_acquire_swfw_sync");
3169 
3170 	for (i = 0; i < timeout; i++) {
3171 		/*
3172 		 * SW NVM semaphore bit is used for access to all
3173 		 * SW_FW_SYNC bits (not just NVM)
3174 		 */
3175 		if (ixgbe_get_eeprom_semaphore(hw))
3176 			return IXGBE_ERR_SWFW_SYNC;
3177 
3178 		gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3179 		if (!(gssr & (fwmask | swmask))) {
3180 			gssr |= swmask;
3181 			IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3182 			ixgbe_release_eeprom_semaphore(hw);
3183 			return IXGBE_SUCCESS;
3184 		} else {
3185 			/* Resource is currently in use by FW or SW */
3186 			ixgbe_release_eeprom_semaphore(hw);
3187 			msec_delay(5);
3188 		}
3189 	}
3190 
3191 	/* If time expired clear the bits holding the lock and retry */
3192 	if (gssr & (fwmask | swmask))
3193 		ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
3194 
3195 	msec_delay(5);
3196 	return IXGBE_ERR_SWFW_SYNC;
3197 }
3198 
3199 /**
3200  *  ixgbe_release_swfw_sync - Release SWFW semaphore
3201  *  @hw: pointer to hardware structure
3202  *  @mask: Mask to specify which semaphore to release
3203  *
3204  *  Releases the SWFW semaphore through the GSSR register for the specified
3205  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
3206  **/
3207 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
3208 {
3209 	u32 gssr;
3210 	u32 swmask = mask;
3211 
3212 	DEBUGFUNC("ixgbe_release_swfw_sync");
3213 
3214 	ixgbe_get_eeprom_semaphore(hw);
3215 
3216 	gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3217 	gssr &= ~swmask;
3218 	IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3219 
3220 	ixgbe_release_eeprom_semaphore(hw);
3221 }
3222 
3223 /**
3224  *  ixgbe_disable_sec_rx_path_generic - Stops the receive data path
3225  *  @hw: pointer to hardware structure
3226  *
3227  *  Stops the receive data path and waits for the HW to internally empty
3228  *  the Rx security block
3229  **/
3230 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
3231 {
3232 #define IXGBE_MAX_SECRX_POLL 40
3233 
3234 	int i;
3235 	int secrxreg;
3236 
3237 	DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
3238 
3239 
3240 	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3241 	secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
3242 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3243 	for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
3244 		secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
3245 		if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
3246 			break;
3247 		else
3248 			/* Use interrupt-safe sleep just in case */
3249 			usec_delay(1000);
3250 	}
3251 
3252 	/* For informational purposes only */
3253 	if (i >= IXGBE_MAX_SECRX_POLL)
3254 		DEBUGOUT("Rx unit being enabled before security "
3255 			 "path fully disabled.  Continuing with init.\n");
3256 
3257 	return IXGBE_SUCCESS;
3258 }
3259 
3260 /**
3261  *  ixgbe_enable_sec_rx_path_generic - Enables the receive data path
3262  *  @hw: pointer to hardware structure
3263  *
3264  *  Enables the receive data path.
3265  **/
3266 s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
3267 {
3268 	int secrxreg;
3269 
3270 	DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
3271 
3272 	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3273 	secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
3274 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3275 	IXGBE_WRITE_FLUSH(hw);
3276 
3277 	return IXGBE_SUCCESS;
3278 }
3279 
3280 /**
3281  *  ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
3282  *  @hw: pointer to hardware structure
3283  *  @regval: register value to write to RXCTRL
3284  *
3285  *  Enables the Rx DMA unit
3286  **/
3287 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
3288 {
3289 	DEBUGFUNC("ixgbe_enable_rx_dma_generic");
3290 
3291 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
3292 
3293 	return IXGBE_SUCCESS;
3294 }
3295 
3296 /**
3297  *  ixgbe_blink_led_start_generic - Blink LED based on index.
3298  *  @hw: pointer to hardware structure
3299  *  @index: led number to blink
3300  **/
3301 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
3302 {
3303 	ixgbe_link_speed speed = 0;
3304 	bool link_up = 0;
3305 	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3306 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3307 	s32 ret_val = IXGBE_SUCCESS;
3308 
3309 	DEBUGFUNC("ixgbe_blink_led_start_generic");
3310 
3311 	/*
3312 	 * Link must be up to auto-blink the LEDs;
3313 	 * Force it if link is down.
3314 	 */
3315 	hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
3316 
3317 	if (!link_up) {
3318 		/* Need the SW/FW semaphore around AUTOC writes if 82599 and
3319 		 * LESM is on.
3320 		 */
3321 		bool got_lock = FALSE;
3322 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
3323 		    ixgbe_verify_lesm_fw_enabled_82599(hw)) {
3324 			ret_val = hw->mac.ops.acquire_swfw_sync(hw,
3325 							IXGBE_GSSR_MAC_CSR_SM);
3326 			if (ret_val != IXGBE_SUCCESS) {
3327 				ret_val = IXGBE_ERR_SWFW_SYNC;
3328 				goto out;
3329 			}
3330 			got_lock = TRUE;
3331 		}
3332 
3333 		autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3334 		autoc_reg |= IXGBE_AUTOC_FLU;
3335 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
3336 		IXGBE_WRITE_FLUSH(hw);
3337 
3338 		if (got_lock)
3339 			hw->mac.ops.release_swfw_sync(hw,
3340 						      IXGBE_GSSR_MAC_CSR_SM);
3341 		msec_delay(10);
3342 	}
3343 
3344 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
3345 	led_reg |= IXGBE_LED_BLINK(index);
3346 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3347 	IXGBE_WRITE_FLUSH(hw);
3348 
3349 out:
3350 	return ret_val;
3351 }
3352 
3353 /**
3354  *  ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
3355  *  @hw: pointer to hardware structure
3356  *  @index: led number to stop blinking
3357  **/
3358 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
3359 {
3360 	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3361 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3362 	s32 ret_val = IXGBE_SUCCESS;
3363 	bool got_lock = FALSE;
3364 
3365 	DEBUGFUNC("ixgbe_blink_led_stop_generic");
3366 	/* Need the SW/FW semaphore around AUTOC writes if 82599 and
3367 	 * LESM is on.
3368 	 */
3369 	if ((hw->mac.type == ixgbe_mac_82599EB) &&
3370 	    ixgbe_verify_lesm_fw_enabled_82599(hw)) {
3371 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
3372 						IXGBE_GSSR_MAC_CSR_SM);
3373 		if (ret_val != IXGBE_SUCCESS) {
3374 			ret_val = IXGBE_ERR_SWFW_SYNC;
3375 			goto out;
3376 		}
3377 		got_lock = TRUE;
3378 	}
3379 
3380 
3381 	autoc_reg &= ~IXGBE_AUTOC_FLU;
3382 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3383 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
3384 
3385 	if (hw->mac.type == ixgbe_mac_82599EB)
3386 		ixgbe_reset_pipeline_82599(hw);
3387 
3388 	if (got_lock)
3389 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
3390 
3391 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
3392 	led_reg &= ~IXGBE_LED_BLINK(index);
3393 	led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
3394 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3395 	IXGBE_WRITE_FLUSH(hw);
3396 
3397 out:
3398 	return ret_val;
3399 }
3400 
3401 /**
3402  *  ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
3403  *  @hw: pointer to hardware structure
3404  *  @san_mac_offset: SAN MAC address offset
3405  *
3406  *  This function will read the EEPROM location for the SAN MAC address
3407  *  pointer, and returns the value at that location.  This is used in both
3408  *  get and set mac_addr routines.
3409  **/
3410 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
3411 					 u16 *san_mac_offset)
3412 {
3413 	s32 ret_val;
3414 
3415 	DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
3416 
3417 	/*
3418 	 * First read the EEPROM pointer to see if the MAC addresses are
3419 	 * available.
3420 	 */
3421 	ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
3422 				      san_mac_offset);
3423 	if (ret_val) {
3424 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3425 			      "eeprom at offset %d failed",
3426 			      IXGBE_SAN_MAC_ADDR_PTR);
3427 	}
3428 
3429 	return ret_val;
3430 }
3431 
3432 /**
3433  *  ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
3434  *  @hw: pointer to hardware structure
3435  *  @san_mac_addr: SAN MAC address
3436  *
3437  *  Reads the SAN MAC address from the EEPROM, if it's available.  This is
3438  *  per-port, so set_lan_id() must be called before reading the addresses.
3439  *  set_lan_id() is called by identify_sfp(), but this cannot be relied
3440  *  upon for non-SFP connections, so we must call it here.
3441  **/
3442 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3443 {
3444 	u16 san_mac_data, san_mac_offset;
3445 	u8 i;
3446 	s32 ret_val;
3447 
3448 	DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
3449 
3450 	/*
3451 	 * First read the EEPROM pointer to see if the MAC addresses are
3452 	 * available.  If they're not, no point in calling set_lan_id() here.
3453 	 */
3454 	ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3455 	if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3456 		goto san_mac_addr_out;
3457 
3458 	/* make sure we know which port we need to program */
3459 	hw->mac.ops.set_lan_id(hw);
3460 	/* apply the port offset to the address offset */
3461 	(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3462 			 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3463 	for (i = 0; i < 3; i++) {
3464 		ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
3465 					      &san_mac_data);
3466 		if (ret_val) {
3467 			ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3468 				      "eeprom read at offset %d failed",
3469 				      san_mac_offset);
3470 			goto san_mac_addr_out;
3471 		}
3472 		san_mac_addr[i * 2] = (u8)(san_mac_data);
3473 		san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
3474 		san_mac_offset++;
3475 	}
3476 	return IXGBE_SUCCESS;
3477 
3478 san_mac_addr_out:
3479 	/*
3480 	 * No addresses available in this EEPROM.  It's not an
3481 	 * error though, so just wipe the local address and return.
3482 	 */
3483 	for (i = 0; i < 6; i++)
3484 		san_mac_addr[i] = 0xFF;
3485 	return IXGBE_SUCCESS;
3486 }
3487 
3488 /**
3489  *  ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
3490  *  @hw: pointer to hardware structure
3491  *  @san_mac_addr: SAN MAC address
3492  *
3493  *  Write a SAN MAC address to the EEPROM.
3494  **/
3495 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3496 {
3497 	s32 ret_val;
3498 	u16 san_mac_data, san_mac_offset;
3499 	u8 i;
3500 
3501 	DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
3502 
3503 	/* Look for SAN mac address pointer.  If not defined, return */
3504 	ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3505 	if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3506 		return IXGBE_ERR_NO_SAN_ADDR_PTR;
3507 
3508 	/* Make sure we know which port we need to write */
3509 	hw->mac.ops.set_lan_id(hw);
3510 	/* Apply the port offset to the address offset */
3511 	(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3512 			 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3513 
3514 	for (i = 0; i < 3; i++) {
3515 		san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
3516 		san_mac_data |= (u16)(san_mac_addr[i * 2]);
3517 		hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
3518 		san_mac_offset++;
3519 	}
3520 
3521 	return IXGBE_SUCCESS;
3522 }
3523 
3524 /**
3525  *  ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
3526  *  @hw: pointer to hardware structure
3527  *
3528  *  Read PCIe configuration space, and get the MSI-X vector count from
3529  *  the capabilities table.
3530  **/
3531 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
3532 {
3533 	u16 msix_count = 1;
3534 	u16 max_msix_count;
3535 	u16 pcie_offset;
3536 
3537 	switch (hw->mac.type) {
3538 	case ixgbe_mac_82598EB:
3539 		pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
3540 		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
3541 		break;
3542 	case ixgbe_mac_82599EB:
3543 	case ixgbe_mac_X540:
3544 		pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
3545 		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
3546 		break;
3547 	default:
3548 		return msix_count;
3549 	}
3550 
3551 	DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
3552 	msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
3553 	msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
3554 
3555 	/* MSI-X count is zero-based in HW */
3556 	msix_count++;
3557 
3558 	if (msix_count > max_msix_count)
3559 		msix_count = max_msix_count;
3560 
3561 	return msix_count;
3562 }
3563 
3564 /**
3565  *  ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
3566  *  @hw: pointer to hardware structure
3567  *  @addr: Address to put into receive address register
3568  *  @vmdq: VMDq pool to assign
3569  *
3570  *  Puts an ethernet address into a receive address register, or
3571  *  finds the rar that it is aleady in; adds to the pool list
3572  **/
3573 s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
3574 {
3575 	static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
3576 	u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
3577 	u32 rar;
3578 	u32 rar_low, rar_high;
3579 	u32 addr_low, addr_high;
3580 
3581 	DEBUGFUNC("ixgbe_insert_mac_addr_generic");
3582 
3583 	/* swap bytes for HW little endian */
3584 	addr_low  = addr[0] | (addr[1] << 8)
3585 			    | (addr[2] << 16)
3586 			    | (addr[3] << 24);
3587 	addr_high = addr[4] | (addr[5] << 8);
3588 
3589 	/*
3590 	 * Either find the mac_id in rar or find the first empty space.
3591 	 * rar_highwater points to just after the highest currently used
3592 	 * rar in order to shorten the search.  It grows when we add a new
3593 	 * rar to the top.
3594 	 */
3595 	for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
3596 		rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
3597 
3598 		if (((IXGBE_RAH_AV & rar_high) == 0)
3599 		    && first_empty_rar == NO_EMPTY_RAR_FOUND) {
3600 			first_empty_rar = rar;
3601 		} else if ((rar_high & 0xFFFF) == addr_high) {
3602 			rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
3603 			if (rar_low == addr_low)
3604 				break;    /* found it already in the rars */
3605 		}
3606 	}
3607 
3608 	if (rar < hw->mac.rar_highwater) {
3609 		/* already there so just add to the pool bits */
3610 		ixgbe_set_vmdq(hw, rar, vmdq);
3611 	} else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3612 		/* stick it into first empty RAR slot we found */
3613 		rar = first_empty_rar;
3614 		ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3615 	} else if (rar == hw->mac.rar_highwater) {
3616 		/* add it to the top of the list and inc the highwater mark */
3617 		ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3618 		hw->mac.rar_highwater++;
3619 	} else if (rar >= hw->mac.num_rar_entries) {
3620 		return IXGBE_ERR_INVALID_MAC_ADDR;
3621 	}
3622 
3623 	/*
3624 	 * If we found rar[0], make sure the default pool bit (we use pool 0)
3625 	 * remains cleared to be sure default pool packets will get delivered
3626 	 */
3627 	if (rar == 0)
3628 		ixgbe_clear_vmdq(hw, rar, 0);
3629 
3630 	return rar;
3631 }
3632 
3633 /**
3634  *  ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3635  *  @hw: pointer to hardware struct
3636  *  @rar: receive address register index to disassociate
3637  *  @vmdq: VMDq pool index to remove from the rar
3638  **/
3639 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3640 {
3641 	u32 mpsar_lo, mpsar_hi;
3642 	u32 rar_entries = hw->mac.num_rar_entries;
3643 
3644 	DEBUGFUNC("ixgbe_clear_vmdq_generic");
3645 
3646 	/* Make sure we are using a valid rar index range */
3647 	if (rar >= rar_entries) {
3648 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3649 			     "RAR index %d is out of range.\n", rar);
3650 		return IXGBE_ERR_INVALID_ARGUMENT;
3651 	}
3652 
3653 	mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3654 	mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3655 
3656 	if (!mpsar_lo && !mpsar_hi)
3657 		goto done;
3658 
3659 	if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
3660 		if (mpsar_lo) {
3661 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3662 			mpsar_lo = 0;
3663 		}
3664 		if (mpsar_hi) {
3665 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3666 			mpsar_hi = 0;
3667 		}
3668 	} else if (vmdq < 32) {
3669 		mpsar_lo &= ~(1 << vmdq);
3670 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
3671 	} else {
3672 		mpsar_hi &= ~(1 << (vmdq - 32));
3673 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
3674 	}
3675 
3676 	/* was that the last pool using this rar? */
3677 	if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
3678 		hw->mac.ops.clear_rar(hw, rar);
3679 done:
3680 	return IXGBE_SUCCESS;
3681 }
3682 
3683 /**
3684  *  ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
3685  *  @hw: pointer to hardware struct
3686  *  @rar: receive address register index to associate with a VMDq index
3687  *  @vmdq: VMDq pool index
3688  **/
3689 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3690 {
3691 	u32 mpsar;
3692 	u32 rar_entries = hw->mac.num_rar_entries;
3693 
3694 	DEBUGFUNC("ixgbe_set_vmdq_generic");
3695 
3696 	/* Make sure we are using a valid rar index range */
3697 	if (rar >= rar_entries) {
3698 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3699 			     "RAR index %d is out of range.\n", rar);
3700 		return IXGBE_ERR_INVALID_ARGUMENT;
3701 	}
3702 
3703 	if (vmdq < 32) {
3704 		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3705 		mpsar |= 1 << vmdq;
3706 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3707 	} else {
3708 		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3709 		mpsar |= 1 << (vmdq - 32);
3710 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3711 	}
3712 	return IXGBE_SUCCESS;
3713 }
3714 
3715 /**
3716  *  This function should only be involved in the IOV mode.
3717  *  In IOV mode, Default pool is next pool after the number of
3718  *  VFs advertized and not 0.
3719  *  MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
3720  *
3721  *  ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
3722  *  @hw: pointer to hardware struct
3723  *  @vmdq: VMDq pool index
3724  **/
3725 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
3726 {
3727 	u32 rar = hw->mac.san_mac_rar_index;
3728 
3729 	DEBUGFUNC("ixgbe_set_vmdq_san_mac");
3730 
3731 	if (vmdq < 32) {
3732 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
3733 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3734 	} else {
3735 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3736 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
3737 	}
3738 
3739 	return IXGBE_SUCCESS;
3740 }
3741 
3742 /**
3743  *  ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3744  *  @hw: pointer to hardware structure
3745  **/
3746 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3747 {
3748 	int i;
3749 
3750 	DEBUGFUNC("ixgbe_init_uta_tables_generic");
3751 	DEBUGOUT(" Clearing UTA\n");
3752 
3753 	for (i = 0; i < 128; i++)
3754 		IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3755 
3756 	return IXGBE_SUCCESS;
3757 }
3758 
3759 /**
3760  *  ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3761  *  @hw: pointer to hardware structure
3762  *  @vlan: VLAN id to write to VLAN filter
3763  *
3764  *  return the VLVF index where this VLAN id should be placed
3765  *
3766  **/
3767 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
3768 {
3769 	u32 bits = 0;
3770 	u32 first_empty_slot = 0;
3771 	s32 regindex;
3772 
3773 	/* short cut the special case */
3774 	if (vlan == 0)
3775 		return 0;
3776 
3777 	/*
3778 	  * Search for the vlan id in the VLVF entries. Save off the first empty
3779 	  * slot found along the way
3780 	  */
3781 	for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
3782 		bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3783 		if (!bits && !(first_empty_slot))
3784 			first_empty_slot = regindex;
3785 		else if ((bits & 0x0FFF) == vlan)
3786 			break;
3787 	}
3788 
3789 	/*
3790 	  * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
3791 	  * in the VLVF. Else use the first empty VLVF register for this
3792 	  * vlan id.
3793 	  */
3794 	if (regindex >= IXGBE_VLVF_ENTRIES) {
3795 		if (first_empty_slot)
3796 			regindex = first_empty_slot;
3797 		else {
3798 			ERROR_REPORT1(IXGBE_ERROR_SOFTWARE,
3799 				     "No space in VLVF.\n");
3800 			regindex = IXGBE_ERR_NO_SPACE;
3801 		}
3802 	}
3803 
3804 	return regindex;
3805 }
3806 
3807 /**
3808  *  ixgbe_set_vfta_generic - Set VLAN filter table
3809  *  @hw: pointer to hardware structure
3810  *  @vlan: VLAN id to write to VLAN filter
3811  *  @vind: VMDq output index that maps queue to VLAN id in VFVFB
3812  *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
3813  *
3814  *  Turn on/off specified VLAN in the VLAN filter table.
3815  **/
3816 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3817 			   bool vlan_on)
3818 {
3819 	s32 regindex;
3820 	u32 bitindex;
3821 	u32 vfta;
3822 	u32 targetbit;
3823 	s32 ret_val = IXGBE_SUCCESS;
3824 	bool vfta_changed = FALSE;
3825 
3826 	DEBUGFUNC("ixgbe_set_vfta_generic");
3827 
3828 	if (vlan > 4095)
3829 		return IXGBE_ERR_PARAM;
3830 
3831 	/*
3832 	 * this is a 2 part operation - first the VFTA, then the
3833 	 * VLVF and VLVFB if VT Mode is set
3834 	 * We don't write the VFTA until we know the VLVF part succeeded.
3835 	 */
3836 
3837 	/* Part 1
3838 	 * The VFTA is a bitstring made up of 128 32-bit registers
3839 	 * that enable the particular VLAN id, much like the MTA:
3840 	 *    bits[11-5]: which register
3841 	 *    bits[4-0]:  which bit in the register
3842 	 */
3843 	regindex = (vlan >> 5) & 0x7F;
3844 	bitindex = vlan & 0x1F;
3845 	targetbit = (1 << bitindex);
3846 	vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
3847 
3848 	if (vlan_on) {
3849 		if (!(vfta & targetbit)) {
3850 			vfta |= targetbit;
3851 			vfta_changed = TRUE;
3852 		}
3853 	} else {
3854 		if ((vfta & targetbit)) {
3855 			vfta &= ~targetbit;
3856 			vfta_changed = TRUE;
3857 		}
3858 	}
3859 
3860 	/* Part 2
3861 	 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
3862 	 */
3863 	ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on,
3864 					 &vfta_changed);
3865 	if (ret_val != IXGBE_SUCCESS)
3866 		return ret_val;
3867 
3868 	if (vfta_changed)
3869 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
3870 
3871 	return IXGBE_SUCCESS;
3872 }
3873 
3874 /**
3875  *  ixgbe_set_vlvf_generic - Set VLAN Pool Filter
3876  *  @hw: pointer to hardware structure
3877  *  @vlan: VLAN id to write to VLAN filter
3878  *  @vind: VMDq output index that maps queue to VLAN id in VFVFB
3879  *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
3880  *  @vfta_changed: pointer to boolean flag which indicates whether VFTA
3881  *                 should be changed
3882  *
3883  *  Turn on/off specified bit in VLVF table.
3884  **/
3885 s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3886 			    bool vlan_on, bool *vfta_changed)
3887 {
3888 	u32 vt;
3889 
3890 	DEBUGFUNC("ixgbe_set_vlvf_generic");
3891 
3892 	if (vlan > 4095)
3893 		return IXGBE_ERR_PARAM;
3894 
3895 	/* If VT Mode is set
3896 	 *   Either vlan_on
3897 	 *     make sure the vlan is in VLVF
3898 	 *     set the vind bit in the matching VLVFB
3899 	 *   Or !vlan_on
3900 	 *     clear the pool bit and possibly the vind
3901 	 */
3902 	vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3903 	if (vt & IXGBE_VT_CTL_VT_ENABLE) {
3904 		s32 vlvf_index;
3905 		u32 bits;
3906 
3907 		vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
3908 		if (vlvf_index < 0)
3909 			return vlvf_index;
3910 
3911 		if (vlan_on) {
3912 			/* set the pool bit */
3913 			if (vind < 32) {
3914 				bits = IXGBE_READ_REG(hw,
3915 						IXGBE_VLVFB(vlvf_index * 2));
3916 				bits |= (1 << vind);
3917 				IXGBE_WRITE_REG(hw,
3918 						IXGBE_VLVFB(vlvf_index * 2),
3919 						bits);
3920 			} else {
3921 				bits = IXGBE_READ_REG(hw,
3922 					IXGBE_VLVFB((vlvf_index * 2) + 1));
3923 				bits |= (1 << (vind - 32));
3924 				IXGBE_WRITE_REG(hw,
3925 					IXGBE_VLVFB((vlvf_index * 2) + 1),
3926 					bits);
3927 			}
3928 		} else {
3929 			/* clear the pool bit */
3930 			if (vind < 32) {
3931 				bits = IXGBE_READ_REG(hw,
3932 						IXGBE_VLVFB(vlvf_index * 2));
3933 				bits &= ~(1 << vind);
3934 				IXGBE_WRITE_REG(hw,
3935 						IXGBE_VLVFB(vlvf_index * 2),
3936 						bits);
3937 				bits |= IXGBE_READ_REG(hw,
3938 					IXGBE_VLVFB((vlvf_index * 2) + 1));
3939 			} else {
3940 				bits = IXGBE_READ_REG(hw,
3941 					IXGBE_VLVFB((vlvf_index * 2) + 1));
3942 				bits &= ~(1 << (vind - 32));
3943 				IXGBE_WRITE_REG(hw,
3944 					IXGBE_VLVFB((vlvf_index * 2) + 1),
3945 					bits);
3946 				bits |= IXGBE_READ_REG(hw,
3947 						IXGBE_VLVFB(vlvf_index * 2));
3948 			}
3949 		}
3950 
3951 		/*
3952 		 * If there are still bits set in the VLVFB registers
3953 		 * for the VLAN ID indicated we need to see if the
3954 		 * caller is requesting that we clear the VFTA entry bit.
3955 		 * If the caller has requested that we clear the VFTA
3956 		 * entry bit but there are still pools/VFs using this VLAN
3957 		 * ID entry then ignore the request.  We're not worried
3958 		 * about the case where we're turning the VFTA VLAN ID
3959 		 * entry bit on, only when requested to turn it off as
3960 		 * there may be multiple pools and/or VFs using the
3961 		 * VLAN ID entry.  In that case we cannot clear the
3962 		 * VFTA bit until all pools/VFs using that VLAN ID have also
3963 		 * been cleared.  This will be indicated by "bits" being
3964 		 * zero.
3965 		 */
3966 		if (bits) {
3967 			IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
3968 					(IXGBE_VLVF_VIEN | vlan));
3969 			if ((!vlan_on) && (vfta_changed != NULL)) {
3970 				/* someone wants to clear the vfta entry
3971 				 * but some pools/VFs are still using it.
3972 				 * Ignore it. */
3973 				*vfta_changed = FALSE;
3974 			}
3975 		} else
3976 			IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
3977 	}
3978 
3979 	return IXGBE_SUCCESS;
3980 }
3981 
3982 /**
3983  *  ixgbe_clear_vfta_generic - Clear VLAN filter table
3984  *  @hw: pointer to hardware structure
3985  *
3986  *  Clears the VLAN filer table, and the VMDq index associated with the filter
3987  **/
3988 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
3989 {
3990 	u32 offset;
3991 
3992 	DEBUGFUNC("ixgbe_clear_vfta_generic");
3993 
3994 	for (offset = 0; offset < hw->mac.vft_size; offset++)
3995 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
3996 
3997 	for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
3998 		IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
3999 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
4000 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
4001 	}
4002 
4003 	return IXGBE_SUCCESS;
4004 }
4005 
4006 /**
4007  *  ixgbe_check_mac_link_generic - Determine link and speed status
4008  *  @hw: pointer to hardware structure
4009  *  @speed: pointer to link speed
4010  *  @link_up: TRUE when link is up
4011  *  @link_up_wait_to_complete: bool used to wait for link up or not
4012  *
4013  *  Reads the links register to determine if link is up and the current speed
4014  **/
4015 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4016 				 bool *link_up, bool link_up_wait_to_complete)
4017 {
4018 	u32 links_reg, links_orig;
4019 	u32 i;
4020 
4021 	DEBUGFUNC("ixgbe_check_mac_link_generic");
4022 
4023 	/* clear the old state */
4024 	links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
4025 
4026 	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4027 
4028 	if (links_orig != links_reg) {
4029 		DEBUGOUT2("LINKS changed from %08X to %08X\n",
4030 			  links_orig, links_reg);
4031 	}
4032 
4033 	if (link_up_wait_to_complete) {
4034 		for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
4035 			if (links_reg & IXGBE_LINKS_UP) {
4036 				*link_up = TRUE;
4037 				break;
4038 			} else {
4039 				*link_up = FALSE;
4040 			}
4041 			msec_delay(100);
4042 			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4043 		}
4044 	} else {
4045 		if (links_reg & IXGBE_LINKS_UP)
4046 			*link_up = TRUE;
4047 		else
4048 			*link_up = FALSE;
4049 	}
4050 
4051 	if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
4052 	    IXGBE_LINKS_SPEED_10G_82599)
4053 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
4054 	else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
4055 		 IXGBE_LINKS_SPEED_1G_82599)
4056 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
4057 	else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
4058 		 IXGBE_LINKS_SPEED_100_82599)
4059 		*speed = IXGBE_LINK_SPEED_100_FULL;
4060 	else
4061 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
4062 
4063 	return IXGBE_SUCCESS;
4064 }
4065 
4066 /**
4067  *  ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
4068  *  the EEPROM
4069  *  @hw: pointer to hardware structure
4070  *  @wwnn_prefix: the alternative WWNN prefix
4071  *  @wwpn_prefix: the alternative WWPN prefix
4072  *
4073  *  This function will read the EEPROM from the alternative SAN MAC address
4074  *  block to check the support for the alternative WWNN/WWPN prefix support.
4075  **/
4076 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
4077 				 u16 *wwpn_prefix)
4078 {
4079 	u16 offset, caps;
4080 	u16 alt_san_mac_blk_offset;
4081 
4082 	DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
4083 
4084 	/* clear output first */
4085 	*wwnn_prefix = 0xFFFF;
4086 	*wwpn_prefix = 0xFFFF;
4087 
4088 	/* check if alternative SAN MAC is supported */
4089 	offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
4090 	if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
4091 		goto wwn_prefix_err;
4092 
4093 	if ((alt_san_mac_blk_offset == 0) ||
4094 	    (alt_san_mac_blk_offset == 0xFFFF))
4095 		goto wwn_prefix_out;
4096 
4097 	/* check capability in alternative san mac address block */
4098 	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
4099 	if (hw->eeprom.ops.read(hw, offset, &caps))
4100 		goto wwn_prefix_err;
4101 	if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
4102 		goto wwn_prefix_out;
4103 
4104 	/* get the corresponding prefix for WWNN/WWPN */
4105 	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
4106 	if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) {
4107 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4108 			      "eeprom read at offset %d failed", offset);
4109 	}
4110 
4111 	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
4112 	if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
4113 		goto wwn_prefix_err;
4114 
4115 wwn_prefix_out:
4116 	return IXGBE_SUCCESS;
4117 
4118 wwn_prefix_err:
4119 	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4120 		      "eeprom read at offset %d failed", offset);
4121 	return IXGBE_SUCCESS;
4122 }
4123 
4124 /**
4125  *  ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
4126  *  @hw: pointer to hardware structure
4127  *  @bs: the fcoe boot status
4128  *
4129  *  This function will read the FCOE boot status from the iSCSI FCOE block
4130  **/
4131 s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
4132 {
4133 	u16 offset, caps, flags;
4134 	s32 status;
4135 
4136 	DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
4137 
4138 	/* clear output first */
4139 	*bs = ixgbe_fcoe_bootstatus_unavailable;
4140 
4141 	/* check if FCOE IBA block is present */
4142 	offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
4143 	status = hw->eeprom.ops.read(hw, offset, &caps);
4144 	if (status != IXGBE_SUCCESS)
4145 		goto out;
4146 
4147 	if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
4148 		goto out;
4149 
4150 	/* check if iSCSI FCOE block is populated */
4151 	status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
4152 	if (status != IXGBE_SUCCESS)
4153 		goto out;
4154 
4155 	if ((offset == 0) || (offset == 0xFFFF))
4156 		goto out;
4157 
4158 	/* read fcoe flags in iSCSI FCOE block */
4159 	offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
4160 	status = hw->eeprom.ops.read(hw, offset, &flags);
4161 	if (status != IXGBE_SUCCESS)
4162 		goto out;
4163 
4164 	if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
4165 		*bs = ixgbe_fcoe_bootstatus_enabled;
4166 	else
4167 		*bs = ixgbe_fcoe_bootstatus_disabled;
4168 
4169 out:
4170 	return status;
4171 }
4172 
4173 /**
4174  *  ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
4175  *  @hw: pointer to hardware structure
4176  *  @enable: enable or disable switch for anti-spoofing
4177  *  @pf: Physical Function pool - do not enable anti-spoofing for the PF
4178  *
4179  **/
4180 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
4181 {
4182 	int j;
4183 	int pf_target_reg = pf >> 3;
4184 	int pf_target_shift = pf % 8;
4185 	u32 pfvfspoof = 0;
4186 
4187 	if (hw->mac.type == ixgbe_mac_82598EB)
4188 		return;
4189 
4190 	if (enable)
4191 		pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
4192 
4193 	/*
4194 	 * PFVFSPOOF register array is size 8 with 8 bits assigned to
4195 	 * MAC anti-spoof enables in each register array element.
4196 	 */
4197 	for (j = 0; j < pf_target_reg; j++)
4198 		IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
4199 
4200 	/*
4201 	 * The PF should be allowed to spoof so that it can support
4202 	 * emulation mode NICs.  Do not set the bits assigned to the PF
4203 	 */
4204 	pfvfspoof &= (1 << pf_target_shift) - 1;
4205 	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
4206 
4207 	/*
4208 	 * Remaining pools belong to the PF so they do not need to have
4209 	 * anti-spoofing enabled.
4210 	 */
4211 	for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
4212 		IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
4213 }
4214 
4215 /**
4216  *  ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
4217  *  @hw: pointer to hardware structure
4218  *  @enable: enable or disable switch for VLAN anti-spoofing
4219  *  @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
4220  *
4221  **/
4222 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4223 {
4224 	int vf_target_reg = vf >> 3;
4225 	int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
4226 	u32 pfvfspoof;
4227 
4228 	if (hw->mac.type == ixgbe_mac_82598EB)
4229 		return;
4230 
4231 	pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4232 	if (enable)
4233 		pfvfspoof |= (1 << vf_target_shift);
4234 	else
4235 		pfvfspoof &= ~(1 << vf_target_shift);
4236 	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4237 }
4238 
4239 /**
4240  *  ixgbe_get_device_caps_generic - Get additional device capabilities
4241  *  @hw: pointer to hardware structure
4242  *  @device_caps: the EEPROM word with the extra device capabilities
4243  *
4244  *  This function will read the EEPROM location for the device capabilities,
4245  *  and return the word through device_caps.
4246  **/
4247 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
4248 {
4249 	DEBUGFUNC("ixgbe_get_device_caps_generic");
4250 
4251 	hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
4252 
4253 	return IXGBE_SUCCESS;
4254 }
4255 
4256 /**
4257  *  ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
4258  *  @hw: pointer to hardware structure
4259  *
4260  **/
4261 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
4262 {
4263 	u32 regval;
4264 	u32 i;
4265 
4266 	DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
4267 
4268 	/* Enable relaxed ordering */
4269 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
4270 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
4271 		regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4272 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
4273 	}
4274 
4275 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
4276 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
4277 		regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
4278 			  IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
4279 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
4280 	}
4281 
4282 }
4283 
4284 /**
4285  *  ixgbe_calculate_checksum - Calculate checksum for buffer
4286  *  @buffer: pointer to EEPROM
4287  *  @length: size of EEPROM to calculate a checksum for
4288  *  Calculates the checksum for some buffer on a specified length.  The
4289  *  checksum calculated is returned.
4290  **/
4291 u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
4292 {
4293 	u32 i;
4294 	u8 sum = 0;
4295 
4296 	DEBUGFUNC("ixgbe_calculate_checksum");
4297 
4298 	if (!buffer)
4299 		return 0;
4300 
4301 	for (i = 0; i < length; i++)
4302 		sum += buffer[i];
4303 
4304 	return (u8) (0 - sum);
4305 }
4306 
4307 /**
4308  *  ixgbe_host_interface_command - Issue command to manageability block
4309  *  @hw: pointer to the HW structure
4310  *  @buffer: contains the command to write and where the return status will
4311  *   be placed
4312  *  @length: length of buffer, must be multiple of 4 bytes
4313  *
4314  *  Communicates with the manageability block.  On success return IXGBE_SUCCESS
4315  *  else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
4316  **/
4317 s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
4318 				 u32 length)
4319 {
4320 	u32 hicr, i, bi;
4321 	u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
4322 	u8 buf_len, dword_len;
4323 
4324 	s32 ret_val = IXGBE_SUCCESS;
4325 
4326 	DEBUGFUNC("ixgbe_host_interface_command");
4327 
4328 	if (length == 0 || length & 0x3 ||
4329 	    length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4330 		DEBUGOUT("Buffer length failure.\n");
4331 		ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4332 		goto out;
4333 	}
4334 
4335 	/* Check that the host interface is enabled. */
4336 	hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4337 	if ((hicr & IXGBE_HICR_EN) == 0) {
4338 		DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
4339 		ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4340 		goto out;
4341 	}
4342 
4343 	/* Calculate length in DWORDs */
4344 	dword_len = length >> 2;
4345 
4346 	/*
4347 	 * The device driver writes the relevant command block
4348 	 * into the ram area.
4349 	 */
4350 	for (i = 0; i < dword_len; i++)
4351 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
4352 				      i, IXGBE_CPU_TO_LE32(buffer[i]));
4353 
4354 	/* Setting this bit tells the ARC that a new command is pending. */
4355 	IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
4356 
4357 	for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) {
4358 		hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4359 		if (!(hicr & IXGBE_HICR_C))
4360 			break;
4361 		msec_delay(1);
4362 	}
4363 
4364 	/* Check command successful completion. */
4365 	if (i == IXGBE_HI_COMMAND_TIMEOUT ||
4366 	    (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
4367 		DEBUGOUT("Command has failed with no status valid.\n");
4368 		ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4369 		goto out;
4370 	}
4371 
4372 	/* Calculate length in DWORDs */
4373 	dword_len = hdr_size >> 2;
4374 
4375 	/* first pull in the header so we know the buffer length */
4376 	for (bi = 0; bi < dword_len; bi++) {
4377 		buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4378 		IXGBE_LE32_TO_CPUS(&buffer[bi]);
4379 	}
4380 
4381 	/* If there is any thing in data position pull it in */
4382 	buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
4383 	if (buf_len == 0)
4384 		goto out;
4385 
4386 	if (length < (buf_len + hdr_size)) {
4387 		DEBUGOUT("Buffer not large enough for reply message.\n");
4388 		ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4389 		goto out;
4390 	}
4391 
4392 	/* Calculate length in DWORDs, add 3 for odd lengths */
4393 	dword_len = (buf_len + 3) >> 2;
4394 
4395 	/* Pull in the rest of the buffer (bi is where we left off)*/
4396 	for (; bi <= dword_len; bi++) {
4397 		buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4398 		IXGBE_LE32_TO_CPUS(&buffer[bi]);
4399 	}
4400 
4401 out:
4402 	return ret_val;
4403 }
4404 
4405 /**
4406  *  ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
4407  *  @hw: pointer to the HW structure
4408  *  @maj: driver version major number
4409  *  @min: driver version minor number
4410  *  @build: driver version build number
4411  *  @sub: driver version sub build number
4412  *
4413  *  Sends driver version number to firmware through the manageability
4414  *  block.  On success return IXGBE_SUCCESS
4415  *  else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4416  *  semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4417  **/
4418 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
4419 				 u8 build, u8 sub)
4420 {
4421 	struct ixgbe_hic_drv_info fw_cmd;
4422 	int i;
4423 	s32 ret_val = IXGBE_SUCCESS;
4424 
4425 	DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
4426 
4427 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM)
4428 	    != IXGBE_SUCCESS) {
4429 		ret_val = IXGBE_ERR_SWFW_SYNC;
4430 		goto out;
4431 	}
4432 
4433 	fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4434 	fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
4435 	fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4436 	fw_cmd.port_num = (u8)hw->bus.func;
4437 	fw_cmd.ver_maj = maj;
4438 	fw_cmd.ver_min = min;
4439 	fw_cmd.ver_build = build;
4440 	fw_cmd.ver_sub = sub;
4441 	fw_cmd.hdr.checksum = 0;
4442 	fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4443 				(FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4444 	fw_cmd.pad = 0;
4445 	fw_cmd.pad2 = 0;
4446 
4447 	for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4448 		ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4449 						       sizeof(fw_cmd));
4450 		if (ret_val != IXGBE_SUCCESS)
4451 			continue;
4452 
4453 		if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4454 		    FW_CEM_RESP_STATUS_SUCCESS)
4455 			ret_val = IXGBE_SUCCESS;
4456 		else
4457 			ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4458 
4459 		break;
4460 	}
4461 
4462 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4463 out:
4464 	return ret_val;
4465 }
4466 
4467 /**
4468  * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
4469  * @hw: pointer to hardware structure
4470  * @num_pb: number of packet buffers to allocate
4471  * @headroom: reserve n KB of headroom
4472  * @strategy: packet buffer allocation strategy
4473  **/
4474 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
4475 			     int strategy)
4476 {
4477 	u32 pbsize = hw->mac.rx_pb_size;
4478 	int i = 0;
4479 	u32 rxpktsize, txpktsize, txpbthresh;
4480 
4481 	/* Reserve headroom */
4482 	pbsize -= headroom;
4483 
4484 	if (!num_pb)
4485 		num_pb = 1;
4486 
4487 	/* Divide remaining packet buffer space amongst the number of packet
4488 	 * buffers requested using supplied strategy.
4489 	 */
4490 	switch (strategy) {
4491 	case PBA_STRATEGY_WEIGHTED:
4492 		/* ixgbe_dcb_pba_80_48 strategy weight first half of packet
4493 		 * buffer with 5/8 of the packet buffer space.
4494 		 */
4495 		rxpktsize = (pbsize * 5) / (num_pb * 4);
4496 		pbsize -= rxpktsize * (num_pb / 2);
4497 		rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
4498 		for (; i < (num_pb / 2); i++)
4499 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4500 		/* Fall through to configure remaining packet buffers */
4501 	case PBA_STRATEGY_EQUAL:
4502 		rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
4503 		for (; i < num_pb; i++)
4504 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4505 		break;
4506 	default:
4507 		break;
4508 	}
4509 
4510 	/* Only support an equally distributed Tx packet buffer strategy. */
4511 	txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
4512 	txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
4513 	for (i = 0; i < num_pb; i++) {
4514 		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4515 		IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4516 	}
4517 
4518 	/* Clear unused TCs, if any, to zero buffer size*/
4519 	for (; i < IXGBE_MAX_PB; i++) {
4520 		IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4521 		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4522 		IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4523 	}
4524 }
4525 
4526 /**
4527  * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
4528  * @hw: pointer to the hardware structure
4529  *
4530  * The 82599 and x540 MACs can experience issues if TX work is still pending
4531  * when a reset occurs.  This function prevents this by flushing the PCIe
4532  * buffers on the system.
4533  **/
4534 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
4535 {
4536 	u32 gcr_ext, hlreg0;
4537 
4538 	/*
4539 	 * If double reset is not requested then all transactions should
4540 	 * already be clear and as such there is no work to do
4541 	 */
4542 	if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
4543 		return;
4544 
4545 	/*
4546 	 * Set loopback enable to prevent any transmits from being sent
4547 	 * should the link come up.  This assumes that the RXCTRL.RXEN bit
4548 	 * has already been cleared.
4549 	 */
4550 	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4551 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
4552 
4553 	/* initiate cleaning flow for buffers in the PCIe transaction layer */
4554 	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
4555 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
4556 			gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
4557 
4558 	/* Flush all writes and allow 20usec for all transactions to clear */
4559 	IXGBE_WRITE_FLUSH(hw);
4560 	usec_delay(20);
4561 
4562 	/* restore previous register values */
4563 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4564 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4565 }
4566 
4567 
4568 /**
4569  * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg
4570  * @hw: pointer to hardware structure
4571  * @map: pointer to u8 arr for returning map
4572  *
4573  * Read the rtrup2tc HW register and resolve its content into map
4574  **/
4575 void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map)
4576 {
4577 	u32 reg, i;
4578 
4579 	reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
4580 	for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
4581 		map[i] = IXGBE_RTRUP2TC_UP_MASK &
4582 			(reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
4583 	return;
4584 }
4585