1 /******************************************************************************
2 SPDX-License-Identifier: BSD-3-Clause
3
4 Copyright (c) 2001-2020, Intel Corporation
5 All rights reserved.
6
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are met:
9
10 1. Redistributions of source code must retain the above copyright notice,
11 this list of conditions and the following disclaimer.
12
13 2. Redistributions in binary form must reproduce the above copyright
14 notice, this list of conditions and the following disclaimer in the
15 documentation and/or other materials provided with the distribution.
16
17 3. Neither the name of the Intel Corporation nor the names of its
18 contributors may be used to endorse or promote products derived from
19 this software without specific prior written permission.
20
21 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 POSSIBILITY OF SUCH DAMAGE.
32
33 ******************************************************************************/
34
35 #include "ixgbe_type.h"
36 #include "ixgbe_82599.h"
37 #include "ixgbe_api.h"
38 #include "ixgbe_common.h"
39 #include "ixgbe_phy.h"
40
41 #define IXGBE_82599_MAX_TX_QUEUES 128
42 #define IXGBE_82599_MAX_RX_QUEUES 128
43 #define IXGBE_82599_RAR_ENTRIES 128
44 #define IXGBE_82599_MC_TBL_SIZE 128
45 #define IXGBE_82599_VFT_TBL_SIZE 128
46 #define IXGBE_82599_RX_PB_SIZE 512
47
48 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
49 ixgbe_link_speed speed,
50 bool autoneg_wait_to_complete);
51 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
52 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
53 u16 offset, u16 *data);
54 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
55 u16 words, u16 *data);
56 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
57 u8 dev_addr, u8 *data);
58 static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
59 u8 dev_addr, u8 data);
60
ixgbe_init_mac_link_ops_82599(struct ixgbe_hw * hw)61 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
62 {
63 struct ixgbe_mac_info *mac = &hw->mac;
64
65 DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
66
67 /*
68 * enable the laser control functions for SFP+ fiber
69 * and MNG not enabled
70 */
71 if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
72 !ixgbe_mng_enabled(hw)) {
73 mac->ops.disable_tx_laser =
74 ixgbe_disable_tx_laser_multispeed_fiber;
75 mac->ops.enable_tx_laser =
76 ixgbe_enable_tx_laser_multispeed_fiber;
77 mac->ops.flap_tx_laser = ixgbe_flap_tx_laser_multispeed_fiber;
78
79 } else {
80 mac->ops.disable_tx_laser = NULL;
81 mac->ops.enable_tx_laser = NULL;
82 mac->ops.flap_tx_laser = NULL;
83 }
84
85 if (hw->phy.multispeed_fiber) {
86 /* Set up dual speed SFP+ support */
87 mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
88 mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599;
89 mac->ops.set_rate_select_speed =
90 ixgbe_set_hard_rate_select_speed;
91 if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber_fixed)
92 mac->ops.set_rate_select_speed =
93 ixgbe_set_soft_rate_select_speed;
94 } else {
95 if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
96 (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
97 hw->phy.smart_speed == ixgbe_smart_speed_on) &&
98 !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
99 mac->ops.setup_link = ixgbe_setup_mac_link_smartspeed;
100 } else {
101 mac->ops.setup_link = ixgbe_setup_mac_link_82599;
102 }
103 }
104 }
105
106 /**
107 * ixgbe_init_phy_ops_82599 - PHY/SFP specific init
108 * @hw: pointer to hardware structure
109 *
110 * Initialize any function pointers that were not able to be
111 * set during init_shared_code because the PHY/SFP type was
112 * not known. Perform the SFP init if necessary.
113 *
114 **/
ixgbe_init_phy_ops_82599(struct ixgbe_hw * hw)115 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
116 {
117 struct ixgbe_mac_info *mac = &hw->mac;
118 struct ixgbe_phy_info *phy = &hw->phy;
119 s32 ret_val = IXGBE_SUCCESS;
120 u32 esdp;
121
122 DEBUGFUNC("ixgbe_init_phy_ops_82599");
123
124 if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
125 /* Store flag indicating I2C bus access control unit. */
126 hw->phy.qsfp_shared_i2c_bus = true;
127
128 /* Initialize access to QSFP+ I2C bus */
129 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
130 esdp |= IXGBE_ESDP_SDP0_DIR;
131 esdp &= ~IXGBE_ESDP_SDP1_DIR;
132 esdp &= ~IXGBE_ESDP_SDP0;
133 esdp &= ~IXGBE_ESDP_SDP0_NATIVE;
134 esdp &= ~IXGBE_ESDP_SDP1_NATIVE;
135 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
136 IXGBE_WRITE_FLUSH(hw);
137
138 phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_82599;
139 phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_82599;
140 }
141 /* Identify the PHY or SFP module */
142 ret_val = phy->ops.identify(hw);
143 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
144 goto init_phy_ops_out;
145
146 /* Setup function pointers based on detected SFP module and speeds */
147 ixgbe_init_mac_link_ops_82599(hw);
148 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
149 hw->phy.ops.reset = NULL;
150
151 /* If copper media, overwrite with copper function pointers */
152 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
153 mac->ops.setup_link = ixgbe_setup_copper_link_82599;
154 mac->ops.get_link_capabilities =
155 ixgbe_get_copper_link_capabilities_generic;
156 }
157
158 /* Set necessary function pointers based on PHY type */
159 switch (hw->phy.type) {
160 case ixgbe_phy_tn:
161 phy->ops.setup_link = ixgbe_setup_phy_link_tnx;
162 phy->ops.check_link = ixgbe_check_phy_link_tnx;
163 phy->ops.get_firmware_version =
164 ixgbe_get_phy_firmware_version_tnx;
165 break;
166 default:
167 break;
168 }
169 init_phy_ops_out:
170 return ret_val;
171 }
172
ixgbe_setup_sfp_modules_82599(struct ixgbe_hw * hw)173 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
174 {
175 s32 ret_val = IXGBE_SUCCESS;
176 u16 list_offset, data_offset, data_value;
177
178 DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
179
180 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
181 ixgbe_init_mac_link_ops_82599(hw);
182
183 hw->phy.ops.reset = NULL;
184
185 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
186 &data_offset);
187 if (ret_val != IXGBE_SUCCESS)
188 goto setup_sfp_out;
189
190 /* PHY config will finish before releasing the semaphore */
191 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
192 IXGBE_GSSR_MAC_CSR_SM);
193 if (ret_val != IXGBE_SUCCESS) {
194 ret_val = IXGBE_ERR_SWFW_SYNC;
195 goto setup_sfp_out;
196 }
197
198 if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
199 goto setup_sfp_err;
200 while (data_value != 0xffff) {
201 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
202 IXGBE_WRITE_FLUSH(hw);
203 if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
204 goto setup_sfp_err;
205 }
206
207 /* Release the semaphore */
208 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
209 /* Delay obtaining semaphore again to allow FW access
210 * prot_autoc_write uses the semaphore too.
211 */
212 msec_delay(hw->eeprom.semaphore_delay);
213
214 /* Restart DSP and set SFI mode */
215 ret_val = hw->mac.ops.prot_autoc_write(hw,
216 hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL,
217 false);
218
219 if (ret_val) {
220 DEBUGOUT("sfp module setup not complete\n");
221 ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
222 goto setup_sfp_out;
223 }
224
225 }
226
227 setup_sfp_out:
228 return ret_val;
229
230 setup_sfp_err:
231 /* Release the semaphore */
232 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
233 /* Delay obtaining semaphore again to allow FW access */
234 msec_delay(hw->eeprom.semaphore_delay);
235 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
236 "eeprom read at offset %d failed", data_offset);
237 return IXGBE_ERR_PHY;
238 }
239
240 /**
241 * prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read
242 * @hw: pointer to hardware structure
243 * @locked: Return the if we locked for this read.
244 * @reg_val: Value we read from AUTOC
245 *
246 * For this part (82599) we need to wrap read-modify-writes with a possible
247 * FW/SW lock. It is assumed this lock will be freed with the next
248 * prot_autoc_write_82599().
249 */
prot_autoc_read_82599(struct ixgbe_hw * hw,bool * locked,u32 * reg_val)250 s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
251 {
252 s32 ret_val;
253
254 *locked = false;
255 /* If LESM is on then we need to hold the SW/FW semaphore. */
256 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
257 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
258 IXGBE_GSSR_MAC_CSR_SM);
259 if (ret_val != IXGBE_SUCCESS)
260 return IXGBE_ERR_SWFW_SYNC;
261
262 *locked = true;
263 }
264
265 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
266 return IXGBE_SUCCESS;
267 }
268
269 /**
270 * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
271 * @hw: pointer to hardware structure
272 * @autoc: value to write to AUTOC
273 * @locked: bool to indicate whether the SW/FW lock was already taken by
274 * previous proc_autoc_read_82599.
275 *
276 * This part (82599) may need to hold the SW/FW lock around all writes to
277 * AUTOC. Likewise after a write we need to do a pipeline reset.
278 */
prot_autoc_write_82599(struct ixgbe_hw * hw,u32 autoc,bool locked)279 s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
280 {
281 s32 ret_val = IXGBE_SUCCESS;
282
283 /* Blocked by MNG FW so bail */
284 if (ixgbe_check_reset_blocked(hw))
285 goto out;
286
287 /* We only need to get the lock if:
288 * - We didn't do it already (in the read part of a read-modify-write)
289 * - LESM is enabled.
290 */
291 if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) {
292 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
293 IXGBE_GSSR_MAC_CSR_SM);
294 if (ret_val != IXGBE_SUCCESS)
295 return IXGBE_ERR_SWFW_SYNC;
296
297 locked = true;
298 }
299
300 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
301 ret_val = ixgbe_reset_pipeline_82599(hw);
302
303 out:
304 /* Free the SW/FW semaphore as we either grabbed it here or
305 * already had it when this function was called.
306 */
307 if (locked)
308 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
309
310 return ret_val;
311 }
312
313 /**
314 * ixgbe_init_ops_82599 - Inits func ptrs and MAC type
315 * @hw: pointer to hardware structure
316 *
317 * Initialize the function pointers and assign the MAC type for 82599.
318 * Does not touch the hardware.
319 **/
320
ixgbe_init_ops_82599(struct ixgbe_hw * hw)321 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
322 {
323 struct ixgbe_mac_info *mac = &hw->mac;
324 struct ixgbe_phy_info *phy = &hw->phy;
325 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
326 s32 ret_val;
327 u16 i;
328
329 DEBUGFUNC("ixgbe_init_ops_82599");
330
331 ixgbe_init_phy_ops_generic(hw);
332 ret_val = ixgbe_init_ops_generic(hw);
333
334 /* PHY */
335 phy->ops.identify = ixgbe_identify_phy_82599;
336 phy->ops.init = ixgbe_init_phy_ops_82599;
337
338 /* MAC */
339 mac->ops.reset_hw = ixgbe_reset_hw_82599;
340 mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2;
341 mac->ops.get_media_type = ixgbe_get_media_type_82599;
342 mac->ops.get_supported_physical_layer =
343 ixgbe_get_supported_physical_layer_82599;
344 mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic;
345 mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic;
346 mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82599;
347 mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82599;
348 mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82599;
349 mac->ops.start_hw = ixgbe_start_hw_82599;
350 mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic;
351 mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic;
352 mac->ops.get_device_caps = ixgbe_get_device_caps_generic;
353 mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic;
354 mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic;
355 mac->ops.prot_autoc_read = prot_autoc_read_82599;
356 mac->ops.prot_autoc_write = prot_autoc_write_82599;
357
358 /* RAR, Multicast, VLAN */
359 mac->ops.set_vmdq = ixgbe_set_vmdq_generic;
360 mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic;
361 mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic;
362 mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic;
363 mac->rar_highwater = 1;
364 mac->ops.set_vfta = ixgbe_set_vfta_generic;
365 mac->ops.set_vlvf = ixgbe_set_vlvf_generic;
366 mac->ops.clear_vfta = ixgbe_clear_vfta_generic;
367 mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic;
368 mac->ops.setup_sfp = ixgbe_setup_sfp_modules_82599;
369 mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing;
370 mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing;
371
372 /* Link */
373 mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82599;
374 mac->ops.check_link = ixgbe_check_mac_link_generic;
375 mac->ops.setup_rxpba = ixgbe_set_rxpba_generic;
376 ixgbe_init_mac_link_ops_82599(hw);
377
378 mac->mcft_size = IXGBE_82599_MC_TBL_SIZE;
379 mac->vft_size = IXGBE_82599_VFT_TBL_SIZE;
380 mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES;
381 mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE;
382 mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES;
383 mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES;
384 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
385
386 mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw))
387 & IXGBE_FWSM_MODE_MASK);
388
389 for (i = 0; i < 64; i++)
390 hw->mbx.ops[i].init_params = ixgbe_init_mbx_params_pf;
391
392 /* EEPROM */
393 eeprom->ops.read = ixgbe_read_eeprom_82599;
394 eeprom->ops.read_buffer = ixgbe_read_eeprom_buffer_82599;
395
396 /* Manageability interface */
397 mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic;
398
399 mac->ops.get_thermal_sensor_data =
400 ixgbe_get_thermal_sensor_data_generic;
401 mac->ops.init_thermal_sensor_thresh =
402 ixgbe_init_thermal_sensor_thresh_generic;
403
404 mac->ops.bypass_rw = ixgbe_bypass_rw_generic;
405 mac->ops.bypass_valid_rd = ixgbe_bypass_valid_rd_generic;
406 mac->ops.bypass_set = ixgbe_bypass_set_generic;
407 mac->ops.bypass_rd_eep = ixgbe_bypass_rd_eep_generic;
408
409 mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
410
411 return ret_val;
412 }
413
414 /**
415 * ixgbe_get_link_capabilities_82599 - Determines link capabilities
416 * @hw: pointer to hardware structure
417 * @speed: pointer to link speed
418 * @autoneg: true when autoneg or autotry is enabled
419 *
420 * Determines the link capabilities by reading the AUTOC register.
421 **/
ixgbe_get_link_capabilities_82599(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * autoneg)422 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
423 ixgbe_link_speed *speed,
424 bool *autoneg)
425 {
426 s32 status = IXGBE_SUCCESS;
427 u32 autoc = 0;
428
429 DEBUGFUNC("ixgbe_get_link_capabilities_82599");
430
431
432 /* Check if 1G SFP module. */
433 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
434 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
435 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
436 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
437 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
438 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
439 hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core0 ||
440 hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1) {
441 *speed = IXGBE_LINK_SPEED_1GB_FULL;
442 *autoneg = true;
443 goto out;
444 }
445
446 if (hw->phy.sfp_type == ixgbe_sfp_type_da_cu_core0 ||
447 hw->phy.sfp_type == ixgbe_sfp_type_da_cu_core1) {
448 *speed = IXGBE_LINK_SPEED_10GB_FULL;
449 *autoneg = true;
450
451 if (hw->phy.multispeed_fiber)
452 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
453
454 goto out;
455 }
456
457 /*
458 * Determine link capabilities based on the stored value of AUTOC,
459 * which represents EEPROM defaults. If AUTOC value has not
460 * been stored, use the current register values.
461 */
462 if (hw->mac.orig_link_settings_stored)
463 autoc = hw->mac.orig_autoc;
464 else
465 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
466
467 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
468 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
469 *speed = IXGBE_LINK_SPEED_1GB_FULL;
470 *autoneg = false;
471 break;
472
473 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
474 *speed = IXGBE_LINK_SPEED_10GB_FULL;
475 *autoneg = false;
476 break;
477
478 case IXGBE_AUTOC_LMS_1G_AN:
479 *speed = IXGBE_LINK_SPEED_1GB_FULL;
480 *autoneg = true;
481 break;
482
483 case IXGBE_AUTOC_LMS_10G_SERIAL:
484 *speed = IXGBE_LINK_SPEED_10GB_FULL;
485 *autoneg = false;
486 break;
487
488 case IXGBE_AUTOC_LMS_KX4_KX_KR:
489 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
490 *speed = IXGBE_LINK_SPEED_UNKNOWN;
491 if (autoc & IXGBE_AUTOC_KR_SUPP)
492 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
493 if (autoc & IXGBE_AUTOC_KX4_SUPP)
494 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
495 if (autoc & IXGBE_AUTOC_KX_SUPP)
496 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
497 *autoneg = true;
498 break;
499
500 case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
501 *speed = IXGBE_LINK_SPEED_100_FULL;
502 if (autoc & IXGBE_AUTOC_KR_SUPP)
503 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
504 if (autoc & IXGBE_AUTOC_KX4_SUPP)
505 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
506 if (autoc & IXGBE_AUTOC_KX_SUPP)
507 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
508 *autoneg = true;
509 break;
510
511 case IXGBE_AUTOC_LMS_SGMII_1G_100M:
512 *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
513 *autoneg = false;
514 break;
515
516 default:
517 status = IXGBE_ERR_LINK_SETUP;
518 goto out;
519 break;
520 }
521
522 if (hw->phy.multispeed_fiber) {
523 *speed |= IXGBE_LINK_SPEED_10GB_FULL |
524 IXGBE_LINK_SPEED_1GB_FULL;
525
526 /* QSFP must not enable full auto-negotiation
527 * Limited autoneg is enabled at 1G
528 */
529 if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp)
530 *autoneg = false;
531 else
532 *autoneg = true;
533 }
534
535 out:
536 return status;
537 }
538
539 /**
540 * ixgbe_get_media_type_82599 - Get media type
541 * @hw: pointer to hardware structure
542 *
543 * Returns the media type (fiber, copper, backplane)
544 **/
ixgbe_get_media_type_82599(struct ixgbe_hw * hw)545 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
546 {
547 enum ixgbe_media_type media_type;
548
549 DEBUGFUNC("ixgbe_get_media_type_82599");
550
551 /* Detect if there is a copper PHY attached. */
552 switch (hw->phy.type) {
553 case ixgbe_phy_cu_unknown:
554 case ixgbe_phy_tn:
555 media_type = ixgbe_media_type_copper;
556 goto out;
557 default:
558 break;
559 }
560
561 switch (hw->device_id) {
562 case IXGBE_DEV_ID_82599_KX4:
563 case IXGBE_DEV_ID_82599_KX4_MEZZ:
564 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
565 case IXGBE_DEV_ID_82599_KR:
566 case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
567 case IXGBE_DEV_ID_82599_XAUI_LOM:
568 /* Default device ID is mezzanine card KX/KX4 */
569 media_type = ixgbe_media_type_backplane;
570 break;
571 case IXGBE_DEV_ID_82599_SFP:
572 case IXGBE_DEV_ID_82599_SFP_FCOE:
573 case IXGBE_DEV_ID_82599_SFP_EM:
574 case IXGBE_DEV_ID_82599_SFP_SF2:
575 case IXGBE_DEV_ID_82599_SFP_SF_QP:
576 case IXGBE_DEV_ID_82599EN_SFP:
577 media_type = ixgbe_media_type_fiber;
578 break;
579 case IXGBE_DEV_ID_82599_CX4:
580 media_type = ixgbe_media_type_cx4;
581 break;
582 case IXGBE_DEV_ID_82599_T3_LOM:
583 media_type = ixgbe_media_type_copper;
584 break;
585 case IXGBE_DEV_ID_82599_LS:
586 media_type = ixgbe_media_type_fiber_lco;
587 break;
588 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
589 media_type = ixgbe_media_type_fiber_qsfp;
590 break;
591 case IXGBE_DEV_ID_82599_BYPASS:
592 media_type = ixgbe_media_type_fiber_fixed;
593 hw->phy.multispeed_fiber = true;
594 break;
595 default:
596 media_type = ixgbe_media_type_unknown;
597 break;
598 }
599 out:
600 return media_type;
601 }
602
603 /**
604 * ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3
605 * @hw: pointer to hardware structure
606 *
607 * Disables link during D3 power down sequence.
608 *
609 **/
ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw * hw)610 void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
611 {
612 u32 autoc2_reg;
613 u16 ee_ctrl_2 = 0;
614
615 DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599");
616 ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
617
618 if (!ixgbe_mng_present(hw) && !hw->wol_enabled &&
619 ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
620 autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
621 autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
622 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
623 }
624 }
625
626 /**
627 * ixgbe_start_mac_link_82599 - Setup MAC link settings
628 * @hw: pointer to hardware structure
629 * @autoneg_wait_to_complete: true when waiting for completion is needed
630 *
631 * Configures link settings based on values in the ixgbe_hw struct.
632 * Restarts the link. Performs autonegotiation if needed.
633 **/
ixgbe_start_mac_link_82599(struct ixgbe_hw * hw,bool autoneg_wait_to_complete)634 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
635 bool autoneg_wait_to_complete)
636 {
637 u32 autoc_reg;
638 u32 links_reg;
639 u32 i;
640 s32 status = IXGBE_SUCCESS;
641 bool got_lock = false;
642
643 DEBUGFUNC("ixgbe_start_mac_link_82599");
644
645
646 /* reset_pipeline requires us to hold this lock as it writes to
647 * AUTOC.
648 */
649 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
650 status = hw->mac.ops.acquire_swfw_sync(hw,
651 IXGBE_GSSR_MAC_CSR_SM);
652 if (status != IXGBE_SUCCESS)
653 goto out;
654
655 got_lock = true;
656 }
657
658 /* Restart link */
659 ixgbe_reset_pipeline_82599(hw);
660
661 if (got_lock)
662 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
663
664 /* Only poll for autoneg to complete if specified to do so */
665 if (autoneg_wait_to_complete) {
666 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
667 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
668 IXGBE_AUTOC_LMS_KX4_KX_KR ||
669 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
670 IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
671 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
672 IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
673 links_reg = 0; /* Just in case Autoneg time = 0 */
674 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
675 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
676 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
677 break;
678 msec_delay(100);
679 }
680 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
681 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
682 DEBUGOUT("Autoneg did not complete.\n");
683 }
684 }
685 }
686
687 /* Add delay to filter out noises during initial link setup */
688 msec_delay(50);
689
690 out:
691 return status;
692 }
693
694 /**
695 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
696 * @hw: pointer to hardware structure
697 *
698 * The base drivers may require better control over SFP+ module
699 * PHY states. This includes selectively shutting down the Tx
700 * laser on the PHY, effectively halting physical link.
701 **/
ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw * hw)702 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
703 {
704 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
705
706 /* Blocked by MNG FW so bail */
707 if (ixgbe_check_reset_blocked(hw))
708 return;
709
710 /* Disable Tx laser; allow 100us to go dark per spec */
711 esdp_reg |= IXGBE_ESDP_SDP3;
712 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
713 IXGBE_WRITE_FLUSH(hw);
714 usec_delay(100);
715 }
716
717 /**
718 * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
719 * @hw: pointer to hardware structure
720 *
721 * The base drivers may require better control over SFP+ module
722 * PHY states. This includes selectively turning on the Tx
723 * laser on the PHY, effectively starting physical link.
724 **/
ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw * hw)725 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
726 {
727 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
728
729 /* Enable Tx laser; allow 100ms to light up */
730 esdp_reg &= ~IXGBE_ESDP_SDP3;
731 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
732 IXGBE_WRITE_FLUSH(hw);
733 msec_delay(100);
734 }
735
736 /**
737 * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
738 * @hw: pointer to hardware structure
739 *
740 * When the driver changes the link speeds that it can support,
741 * it sets autotry_restart to true to indicate that we need to
742 * initiate a new autotry session with the link partner. To do
743 * so, we set the speed then disable and re-enable the Tx laser, to
744 * alert the link partner that it also needs to restart autotry on its
745 * end. This is consistent with true clause 37 autoneg, which also
746 * involves a loss of signal.
747 **/
ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw * hw)748 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
749 {
750 DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
751
752 /* Blocked by MNG FW so bail */
753 if (ixgbe_check_reset_blocked(hw))
754 return;
755
756 if (hw->mac.autotry_restart) {
757 ixgbe_disable_tx_laser_multispeed_fiber(hw);
758 ixgbe_enable_tx_laser_multispeed_fiber(hw);
759 hw->mac.autotry_restart = false;
760 }
761 }
762
763 /**
764 * ixgbe_set_hard_rate_select_speed - Set module link speed
765 * @hw: pointer to hardware structure
766 * @speed: link speed to set
767 *
768 * Set module link speed via RS0/RS1 rate select pins.
769 */
ixgbe_set_hard_rate_select_speed(struct ixgbe_hw * hw,ixgbe_link_speed speed)770 void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw,
771 ixgbe_link_speed speed)
772 {
773 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
774
775 switch (speed) {
776 case IXGBE_LINK_SPEED_10GB_FULL:
777 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
778 break;
779 case IXGBE_LINK_SPEED_1GB_FULL:
780 esdp_reg &= ~IXGBE_ESDP_SDP5;
781 esdp_reg |= IXGBE_ESDP_SDP5_DIR;
782 break;
783 default:
784 DEBUGOUT("Invalid fixed module speed\n");
785 return;
786 }
787
788 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
789 IXGBE_WRITE_FLUSH(hw);
790 }
791
792 /**
793 * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
794 * @hw: pointer to hardware structure
795 * @speed: new link speed
796 * @autoneg_wait_to_complete: true when waiting for completion is needed
797 *
798 * Implements the Intel SmartSpeed algorithm.
799 **/
ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)800 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
801 ixgbe_link_speed speed,
802 bool autoneg_wait_to_complete)
803 {
804 s32 status = IXGBE_SUCCESS;
805 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
806 s32 i, j;
807 bool link_up = false;
808 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
809
810 DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
811
812 /* Set autoneg_advertised value based on input link speed */
813 hw->phy.autoneg_advertised = 0;
814
815 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
816 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
817
818 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
819 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
820
821 if (speed & IXGBE_LINK_SPEED_100_FULL)
822 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
823
824 /*
825 * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the
826 * autoneg advertisement if link is unable to be established at the
827 * highest negotiated rate. This can sometimes happen due to integrity
828 * issues with the physical media connection.
829 */
830
831 /* First, try to get link with full advertisement */
832 hw->phy.smart_speed_active = false;
833 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
834 status = ixgbe_setup_mac_link_82599(hw, speed,
835 autoneg_wait_to_complete);
836 if (status != IXGBE_SUCCESS)
837 goto out;
838
839 /*
840 * Wait for the controller to acquire link. Per IEEE 802.3ap,
841 * Section 73.10.2, we may have to wait up to 500ms if KR is
842 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
843 * Table 9 in the AN MAS.
844 */
845 for (i = 0; i < 5; i++) {
846 msec_delay(100);
847
848 /* If we have link, just jump out */
849 status = ixgbe_check_link(hw, &link_speed, &link_up,
850 false);
851 if (status != IXGBE_SUCCESS)
852 goto out;
853
854 if (link_up)
855 goto out;
856 }
857 }
858
859 /*
860 * We didn't get link. If we advertised KR plus one of KX4/KX
861 * (or BX4/BX), then disable KR and try again.
862 */
863 if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
864 ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
865 goto out;
866
867 /* Turn SmartSpeed on to disable KR support */
868 hw->phy.smart_speed_active = true;
869 status = ixgbe_setup_mac_link_82599(hw, speed,
870 autoneg_wait_to_complete);
871 if (status != IXGBE_SUCCESS)
872 goto out;
873
874 /*
875 * Wait for the controller to acquire link. 600ms will allow for
876 * the AN link_fail_inhibit_timer as well for multiple cycles of
877 * parallel detect, both 10g and 1g. This allows for the maximum
878 * connect attempts as defined in the AN MAS table 73-7.
879 */
880 for (i = 0; i < 6; i++) {
881 msec_delay(100);
882
883 /* If we have link, just jump out */
884 status = ixgbe_check_link(hw, &link_speed, &link_up, false);
885 if (status != IXGBE_SUCCESS)
886 goto out;
887
888 if (link_up)
889 goto out;
890 }
891
892 /* We didn't get link. Turn SmartSpeed back off. */
893 hw->phy.smart_speed_active = false;
894 status = ixgbe_setup_mac_link_82599(hw, speed,
895 autoneg_wait_to_complete);
896
897 out:
898 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
899 DEBUGOUT("Smartspeed has downgraded the link speed "
900 "from the maximum advertised\n");
901 return status;
902 }
903
904 /**
905 * ixgbe_setup_mac_link_82599 - Set MAC link speed
906 * @hw: pointer to hardware structure
907 * @speed: new link speed
908 * @autoneg_wait_to_complete: true when waiting for completion is needed
909 *
910 * Set the link speed in the AUTOC register and restarts link.
911 **/
ixgbe_setup_mac_link_82599(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)912 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
913 ixgbe_link_speed speed,
914 bool autoneg_wait_to_complete)
915 {
916 bool autoneg = false;
917 s32 status = IXGBE_SUCCESS;
918 u32 pma_pmd_1g, link_mode;
919 u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); /* holds the value of AUTOC register at this current point in time */
920 u32 orig_autoc = 0; /* holds the cached value of AUTOC register */
921 u32 autoc = current_autoc; /* Temporary variable used for comparison purposes */
922 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
923 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
924 u32 links_reg;
925 u32 i;
926 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
927
928 DEBUGFUNC("ixgbe_setup_mac_link_82599");
929
930 /* Check to see if speed passed in is supported. */
931 status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
932 if (status)
933 goto out;
934
935 speed &= link_capabilities;
936
937 if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
938 status = IXGBE_ERR_LINK_SETUP;
939 goto out;
940 }
941
942 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
943 if (hw->mac.orig_link_settings_stored)
944 orig_autoc = hw->mac.orig_autoc;
945 else
946 orig_autoc = autoc;
947
948 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
949 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
950
951 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
952 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
953 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
954 /* Set KX4/KX/KR support according to speed requested */
955 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
956 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
957 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
958 autoc |= IXGBE_AUTOC_KX4_SUPP;
959 if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
960 (hw->phy.smart_speed_active == false))
961 autoc |= IXGBE_AUTOC_KR_SUPP;
962 }
963 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
964 autoc |= IXGBE_AUTOC_KX_SUPP;
965 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
966 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
967 link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
968 /* Switch from 1G SFI to 10G SFI if requested */
969 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
970 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
971 autoc &= ~IXGBE_AUTOC_LMS_MASK;
972 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
973 }
974 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
975 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
976 /* Switch from 10G SFI to 1G SFI if requested */
977 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
978 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
979 autoc &= ~IXGBE_AUTOC_LMS_MASK;
980 if (autoneg || hw->phy.type == ixgbe_phy_qsfp_intel)
981 autoc |= IXGBE_AUTOC_LMS_1G_AN;
982 else
983 autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
984 }
985 }
986
987 if (autoc != current_autoc) {
988 /* Restart link */
989 status = hw->mac.ops.prot_autoc_write(hw, autoc, false);
990 if (status != IXGBE_SUCCESS)
991 goto out;
992
993 /* Only poll for autoneg to complete if specified to do so */
994 if (autoneg_wait_to_complete) {
995 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
996 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
997 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
998 links_reg = 0; /*Just in case Autoneg time=0*/
999 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
1000 links_reg =
1001 IXGBE_READ_REG(hw, IXGBE_LINKS);
1002 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
1003 break;
1004 msec_delay(100);
1005 }
1006 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
1007 status =
1008 IXGBE_ERR_AUTONEG_NOT_COMPLETE;
1009 DEBUGOUT("Autoneg did not complete.\n");
1010 }
1011 }
1012 }
1013
1014 /* Add delay to filter out noises during initial link setup */
1015 msec_delay(50);
1016 }
1017
1018 out:
1019 return status;
1020 }
1021
1022 /**
1023 * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
1024 * @hw: pointer to hardware structure
1025 * @speed: new link speed
1026 * @autoneg_wait_to_complete: true if waiting is needed to complete
1027 *
1028 * Restarts link on PHY and MAC based on settings passed in.
1029 **/
ixgbe_setup_copper_link_82599(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)1030 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
1031 ixgbe_link_speed speed,
1032 bool autoneg_wait_to_complete)
1033 {
1034 s32 status;
1035
1036 DEBUGFUNC("ixgbe_setup_copper_link_82599");
1037
1038 /* Setup the PHY according to input speed */
1039 status = hw->phy.ops.setup_link_speed(hw, speed,
1040 autoneg_wait_to_complete);
1041 /* Set up MAC */
1042 ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
1043
1044 return status;
1045 }
1046
1047 /**
1048 * ixgbe_reset_hw_82599 - Perform hardware reset
1049 * @hw: pointer to hardware structure
1050 *
1051 * Resets the hardware by resetting the transmit and receive units, masks
1052 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
1053 * reset.
1054 **/
ixgbe_reset_hw_82599(struct ixgbe_hw * hw)1055 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
1056 {
1057 ixgbe_link_speed link_speed;
1058 s32 status;
1059 u32 ctrl = 0;
1060 u32 i, autoc, autoc2;
1061 u32 curr_lms;
1062 bool link_up = false;
1063
1064 DEBUGFUNC("ixgbe_reset_hw_82599");
1065
1066 /* Call adapter stop to disable tx/rx and clear interrupts */
1067 status = hw->mac.ops.stop_adapter(hw);
1068 if (status != IXGBE_SUCCESS)
1069 goto reset_hw_out;
1070
1071 /* flush pending Tx transactions */
1072 ixgbe_clear_tx_pending(hw);
1073
1074 /* PHY ops must be identified and initialized prior to reset */
1075
1076 /* Identify PHY and related function pointers */
1077 status = hw->phy.ops.init(hw);
1078
1079 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1080 goto reset_hw_out;
1081
1082 /* Setup SFP module if there is one present. */
1083 if (hw->phy.sfp_setup_needed) {
1084 status = hw->mac.ops.setup_sfp(hw);
1085 hw->phy.sfp_setup_needed = false;
1086 }
1087
1088 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1089 goto reset_hw_out;
1090
1091 /* Reset PHY */
1092 if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL)
1093 hw->phy.ops.reset(hw);
1094
1095 /* remember AUTOC from before we reset */
1096 curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK;
1097
1098 mac_reset_top:
1099 /*
1100 * Issue global reset to the MAC. Needs to be SW reset if link is up.
1101 * If link reset is used when link is up, it might reset the PHY when
1102 * mng is using it. If link is down or the flag to force full link
1103 * reset is set, then perform link reset.
1104 */
1105 ctrl = IXGBE_CTRL_LNK_RST;
1106 if (!hw->force_full_reset) {
1107 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
1108 if (link_up)
1109 ctrl = IXGBE_CTRL_RST;
1110 }
1111
1112 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
1113 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
1114 IXGBE_WRITE_FLUSH(hw);
1115
1116 /* Poll for reset bit to self-clear meaning reset is complete */
1117 for (i = 0; i < 10; i++) {
1118 usec_delay(1);
1119 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
1120 if (!(ctrl & IXGBE_CTRL_RST_MASK))
1121 break;
1122 }
1123
1124 if (ctrl & IXGBE_CTRL_RST_MASK) {
1125 status = IXGBE_ERR_RESET_FAILED;
1126 DEBUGOUT("Reset polling failed to complete.\n");
1127 }
1128
1129 msec_delay(50);
1130
1131 /*
1132 * Double resets are required for recovery from certain error
1133 * conditions. Between resets, it is necessary to stall to
1134 * allow time for any pending HW events to complete.
1135 */
1136 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
1137 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
1138 goto mac_reset_top;
1139 }
1140
1141 /*
1142 * Store the original AUTOC/AUTOC2 values if they have not been
1143 * stored off yet. Otherwise restore the stored original
1144 * values since the reset operation sets back to defaults.
1145 */
1146 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1147 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1148
1149 /* Enable link if disabled in NVM */
1150 if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
1151 autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
1152 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1153 IXGBE_WRITE_FLUSH(hw);
1154 }
1155
1156 if (hw->mac.orig_link_settings_stored == false) {
1157 hw->mac.orig_autoc = autoc;
1158 hw->mac.orig_autoc2 = autoc2;
1159 hw->mac.orig_link_settings_stored = true;
1160 } else {
1161
1162 /* If MNG FW is running on a multi-speed device that
1163 * doesn't autoneg with out driver support we need to
1164 * leave LMS in the state it was before we MAC reset.
1165 * Likewise if we support WoL we don't want change the
1166 * LMS state.
1167 */
1168 if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) ||
1169 hw->wol_enabled)
1170 hw->mac.orig_autoc =
1171 (hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
1172 curr_lms;
1173
1174 if (autoc != hw->mac.orig_autoc) {
1175 status = hw->mac.ops.prot_autoc_write(hw,
1176 hw->mac.orig_autoc,
1177 false);
1178 if (status != IXGBE_SUCCESS)
1179 goto reset_hw_out;
1180 }
1181
1182 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
1183 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
1184 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
1185 autoc2 |= (hw->mac.orig_autoc2 &
1186 IXGBE_AUTOC2_UPPER_MASK);
1187 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1188 }
1189 }
1190
1191 /* Store the permanent mac address */
1192 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
1193
1194 /*
1195 * Store MAC address from RAR0, clear receive address registers, and
1196 * clear the multicast table. Also reset num_rar_entries to 128,
1197 * since we modify this value when programming the SAN MAC address.
1198 */
1199 hw->mac.num_rar_entries = 128;
1200 hw->mac.ops.init_rx_addrs(hw);
1201
1202 /* Store the permanent SAN mac address */
1203 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
1204
1205 /* Add the SAN MAC address to the RAR only if it's a valid address */
1206 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
1207 /* Save the SAN MAC RAR index */
1208 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
1209
1210 hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index,
1211 hw->mac.san_addr, 0, IXGBE_RAH_AV);
1212
1213 /* clear VMDq pool/queue selection for this RAR */
1214 hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index,
1215 IXGBE_CLEAR_VMDQ_ALL);
1216
1217 /* Reserve the last RAR for the SAN MAC address */
1218 hw->mac.num_rar_entries--;
1219 }
1220
1221 /* Store the alternative WWNN/WWPN prefix */
1222 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1223 &hw->mac.wwpn_prefix);
1224
1225 reset_hw_out:
1226 return status;
1227 }
1228
1229 /**
1230 * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
1231 * @hw: pointer to hardware structure
1232 * @fdircmd: current value of FDIRCMD register
1233 */
ixgbe_fdir_check_cmd_complete(struct ixgbe_hw * hw,u32 * fdircmd)1234 static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
1235 {
1236 int i;
1237
1238 for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
1239 *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
1240 if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
1241 return IXGBE_SUCCESS;
1242 usec_delay(10);
1243 }
1244
1245 return IXGBE_ERR_FDIR_CMD_INCOMPLETE;
1246 }
1247
1248 /**
1249 * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
1250 * @hw: pointer to hardware structure
1251 **/
ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw * hw)1252 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1253 {
1254 s32 err;
1255 int i;
1256 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1257 u32 fdircmd;
1258 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
1259
1260 DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
1261
1262 /*
1263 * Before starting reinitialization process,
1264 * FDIRCMD.CMD must be zero.
1265 */
1266 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1267 if (err) {
1268 DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n");
1269 return err;
1270 }
1271
1272 IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
1273 IXGBE_WRITE_FLUSH(hw);
1274 /*
1275 * 82599 adapters flow director init flow cannot be restarted,
1276 * Workaround 82599 silicon errata by performing the following steps
1277 * before re-writing the FDIRCTRL control register with the same value.
1278 * - write 1 to bit 8 of FDIRCMD register &
1279 * - write 0 to bit 8 of FDIRCMD register
1280 */
1281 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1282 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1283 IXGBE_FDIRCMD_CLEARHT));
1284 IXGBE_WRITE_FLUSH(hw);
1285 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1286 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1287 ~IXGBE_FDIRCMD_CLEARHT));
1288 IXGBE_WRITE_FLUSH(hw);
1289 /*
1290 * Clear FDIR Hash register to clear any leftover hashes
1291 * waiting to be programmed.
1292 */
1293 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
1294 IXGBE_WRITE_FLUSH(hw);
1295
1296 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1297 IXGBE_WRITE_FLUSH(hw);
1298
1299 /* Poll init-done after we write FDIRCTRL register */
1300 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1301 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1302 IXGBE_FDIRCTRL_INIT_DONE)
1303 break;
1304 msec_delay(1);
1305 }
1306 if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1307 DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1308 return IXGBE_ERR_FDIR_REINIT_FAILED;
1309 }
1310
1311 /* Clear FDIR statistics registers (read to clear) */
1312 IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1313 IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
1314 IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1315 IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1316 IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1317
1318 return IXGBE_SUCCESS;
1319 }
1320
1321 /**
1322 * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
1323 * @hw: pointer to hardware structure
1324 * @fdirctrl: value to write to flow director control register
1325 **/
ixgbe_fdir_enable_82599(struct ixgbe_hw * hw,u32 fdirctrl)1326 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1327 {
1328 int i;
1329
1330 DEBUGFUNC("ixgbe_fdir_enable_82599");
1331
1332 /* Prime the keys for hashing */
1333 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1334 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
1335
1336 /*
1337 * Poll init-done after we write the register. Estimated times:
1338 * 10G: PBALLOC = 11b, timing is 60us
1339 * 1G: PBALLOC = 11b, timing is 600us
1340 * 100M: PBALLOC = 11b, timing is 6ms
1341 *
1342 * Multiple these timings by 4 if under full Rx load
1343 *
1344 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1345 * 1 msec per poll time. If we're at line rate and drop to 100M, then
1346 * this might not finish in our poll time, but we can live with that
1347 * for now.
1348 */
1349 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1350 IXGBE_WRITE_FLUSH(hw);
1351 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1352 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1353 IXGBE_FDIRCTRL_INIT_DONE)
1354 break;
1355 msec_delay(1);
1356 }
1357
1358 if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1359 DEBUGOUT("Flow Director poll time exceeded!\n");
1360 }
1361
1362 /**
1363 * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
1364 * @hw: pointer to hardware structure
1365 * @fdirctrl: value to write to flow director control register, initially
1366 * contains just the value of the Rx packet buffer allocation
1367 **/
ixgbe_init_fdir_signature_82599(struct ixgbe_hw * hw,u32 fdirctrl)1368 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1369 {
1370 DEBUGFUNC("ixgbe_init_fdir_signature_82599");
1371
1372 /*
1373 * Continue setup of fdirctrl register bits:
1374 * Move the flexible bytes to use the ethertype - shift 6 words
1375 * Set the maximum length per hash bucket to 0xA filters
1376 * Send interrupt when 64 filters are left
1377 */
1378 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1379 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1380 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1381
1382 /* write hashes and fdirctrl register, poll for completion */
1383 ixgbe_fdir_enable_82599(hw, fdirctrl);
1384
1385 return IXGBE_SUCCESS;
1386 }
1387
1388 /**
1389 * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
1390 * @hw: pointer to hardware structure
1391 * @fdirctrl: value to write to flow director control register, initially
1392 * contains just the value of the Rx packet buffer allocation
1393 * @cloud_mode: true - cloud mode, false - other mode
1394 **/
ixgbe_init_fdir_perfect_82599(struct ixgbe_hw * hw,u32 fdirctrl,bool cloud_mode)1395 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl,
1396 bool cloud_mode)
1397 {
1398 UNREFERENCED_1PARAMETER(cloud_mode);
1399 DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
1400
1401 /*
1402 * Continue setup of fdirctrl register bits:
1403 * Turn perfect match filtering on
1404 * Report hash in RSS field of Rx wb descriptor
1405 * Initialize the drop queue to queue 127
1406 * Move the flexible bytes to use the ethertype - shift 6 words
1407 * Set the maximum length per hash bucket to 0xA filters
1408 * Send interrupt when 64 (0x4 * 16) filters are left
1409 */
1410 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
1411 IXGBE_FDIRCTRL_REPORT_STATUS |
1412 (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
1413 (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1414 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1415 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1416
1417 if (cloud_mode)
1418 fdirctrl |=(IXGBE_FDIRCTRL_FILTERMODE_CLOUD <<
1419 IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
1420
1421 /* write hashes and fdirctrl register, poll for completion */
1422 ixgbe_fdir_enable_82599(hw, fdirctrl);
1423
1424 return IXGBE_SUCCESS;
1425 }
1426
1427 /**
1428 * ixgbe_set_fdir_drop_queue_82599 - Set Flow Director drop queue
1429 * @hw: pointer to hardware structure
1430 * @dropqueue: Rx queue index used for the dropped packets
1431 **/
ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw * hw,u8 dropqueue)1432 void ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw *hw, u8 dropqueue)
1433 {
1434 u32 fdirctrl;
1435
1436 DEBUGFUNC("ixgbe_set_fdir_drop_queue_82599");
1437 /* Clear init done bit and drop queue field */
1438 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1439 fdirctrl &= ~(IXGBE_FDIRCTRL_DROP_Q_MASK | IXGBE_FDIRCTRL_INIT_DONE);
1440
1441 /* Set drop queue */
1442 fdirctrl |= (dropqueue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
1443 if ((hw->mac.type == ixgbe_mac_X550) ||
1444 (hw->mac.type == ixgbe_mac_X550EM_x) ||
1445 (hw->mac.type == ixgbe_mac_X550EM_a))
1446 fdirctrl |= IXGBE_FDIRCTRL_DROP_NO_MATCH;
1447
1448 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1449 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1450 IXGBE_FDIRCMD_CLEARHT));
1451 IXGBE_WRITE_FLUSH(hw);
1452 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1453 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1454 ~IXGBE_FDIRCMD_CLEARHT));
1455 IXGBE_WRITE_FLUSH(hw);
1456
1457 /* write hashes and fdirctrl register, poll for completion */
1458 ixgbe_fdir_enable_82599(hw, fdirctrl);
1459 }
1460
1461 /*
1462 * These defines allow us to quickly generate all of the necessary instructions
1463 * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
1464 * for values 0 through 15
1465 */
1466 #define IXGBE_ATR_COMMON_HASH_KEY \
1467 (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
1468 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
1469 do { \
1470 u32 n = (_n); \
1471 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
1472 common_hash ^= lo_hash_dword >> n; \
1473 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1474 bucket_hash ^= lo_hash_dword >> n; \
1475 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
1476 sig_hash ^= lo_hash_dword << (16 - n); \
1477 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
1478 common_hash ^= hi_hash_dword >> n; \
1479 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1480 bucket_hash ^= hi_hash_dword >> n; \
1481 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
1482 sig_hash ^= hi_hash_dword << (16 - n); \
1483 } while (0)
1484
1485 /**
1486 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
1487 * @input: input bitstream to compute the hash on
1488 * @common: compressed common input dword
1489 *
1490 * This function is almost identical to the function above but contains
1491 * several optimizations such as unwinding all of the loops, letting the
1492 * compiler work out all of the conditional ifs since the keys are static
1493 * defines, and computing two keys at once since the hashed dword stream
1494 * will be the same for both keys.
1495 **/
ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,union ixgbe_atr_hash_dword common)1496 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
1497 union ixgbe_atr_hash_dword common)
1498 {
1499 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1500 u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
1501
1502 /* record the flow_vm_vlan bits as they are a key part to the hash */
1503 flow_vm_vlan = IXGBE_NTOHL(input.dword);
1504
1505 /* generate common hash dword */
1506 hi_hash_dword = IXGBE_NTOHL(common.dword);
1507
1508 /* low dword is word swapped version of common */
1509 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1510
1511 /* apply flow ID/VM pool/VLAN ID bits to hash words */
1512 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1513
1514 /* Process bits 0 and 16 */
1515 IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
1516
1517 /*
1518 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1519 * delay this because bit 0 of the stream should not be processed
1520 * so we do not add the VLAN until after bit 0 was processed
1521 */
1522 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1523
1524 /* Process remaining 30 bit of the key */
1525 IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
1526 IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
1527 IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
1528 IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
1529 IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
1530 IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
1531 IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
1532 IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
1533 IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
1534 IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
1535 IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
1536 IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
1537 IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
1538 IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
1539 IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
1540
1541 /* combine common_hash result with signature and bucket hashes */
1542 bucket_hash ^= common_hash;
1543 bucket_hash &= IXGBE_ATR_HASH_MASK;
1544
1545 sig_hash ^= common_hash << 16;
1546 sig_hash &= IXGBE_ATR_HASH_MASK << 16;
1547
1548 /* return completed signature hash */
1549 return sig_hash ^ bucket_hash;
1550 }
1551
1552 /**
1553 * ixgbe_fdir_add_signature_filter_82599 - Adds a signature hash filter
1554 * @hw: pointer to hardware structure
1555 * @input: unique input dword
1556 * @common: compressed common input dword
1557 * @queue: queue index to direct traffic to
1558 *
1559 * Note that the tunnel bit in input must not be set when the hardware
1560 * tunneling support does not exist.
1561 **/
ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw * hw,union ixgbe_atr_hash_dword input,union ixgbe_atr_hash_dword common,u8 queue)1562 void ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1563 union ixgbe_atr_hash_dword input,
1564 union ixgbe_atr_hash_dword common,
1565 u8 queue)
1566 {
1567 u64 fdirhashcmd;
1568 u8 flow_type;
1569 bool tunnel;
1570 u32 fdircmd;
1571
1572 DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
1573
1574 /*
1575 * Get the flow_type in order to program FDIRCMD properly
1576 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
1577 * fifth is FDIRCMD.TUNNEL_FILTER
1578 */
1579 tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK);
1580 flow_type = input.formatted.flow_type &
1581 (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1);
1582 switch (flow_type) {
1583 case IXGBE_ATR_FLOW_TYPE_TCPV4:
1584 case IXGBE_ATR_FLOW_TYPE_UDPV4:
1585 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1586 case IXGBE_ATR_FLOW_TYPE_TCPV6:
1587 case IXGBE_ATR_FLOW_TYPE_UDPV6:
1588 case IXGBE_ATR_FLOW_TYPE_SCTPV6:
1589 break;
1590 default:
1591 DEBUGOUT(" Error on flow type input\n");
1592 return;
1593 }
1594
1595 /* configure FDIRCMD register */
1596 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1597 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1598 fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1599 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1600 if (tunnel)
1601 fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
1602
1603 /*
1604 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1605 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
1606 */
1607 fdirhashcmd = (u64)fdircmd << 32;
1608 fdirhashcmd |= (u64)ixgbe_atr_compute_sig_hash_82599(input, common);
1609 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1610
1611 DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
1612
1613 return;
1614 }
1615
1616 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
1617 do { \
1618 u32 n = (_n); \
1619 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1620 bucket_hash ^= lo_hash_dword >> n; \
1621 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1622 bucket_hash ^= hi_hash_dword >> n; \
1623 } while (0)
1624
1625 /**
1626 * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
1627 * @input: input bitstream to compute the hash on
1628 * @input_mask: mask for the input bitstream
1629 *
1630 * This function serves two main purposes. First it applies the input_mask
1631 * to the atr_input resulting in a cleaned up atr_input data stream.
1632 * Secondly it computes the hash and stores it in the bkt_hash field at
1633 * the end of the input byte stream. This way it will be available for
1634 * future use without needing to recompute the hash.
1635 **/
ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input * input,union ixgbe_atr_input * input_mask)1636 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
1637 union ixgbe_atr_input *input_mask)
1638 {
1639
1640 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1641 u32 bucket_hash = 0;
1642 u32 hi_dword = 0;
1643 u32 i = 0;
1644
1645 /* Apply masks to input data */
1646 for (i = 0; i < 14; i++)
1647 input->dword_stream[i] &= input_mask->dword_stream[i];
1648
1649 /* record the flow_vm_vlan bits as they are a key part to the hash */
1650 flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
1651
1652 /* generate common hash dword */
1653 for (i = 1; i <= 13; i++)
1654 hi_dword ^= input->dword_stream[i];
1655 hi_hash_dword = IXGBE_NTOHL(hi_dword);
1656
1657 /* low dword is word swapped version of common */
1658 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1659
1660 /* apply flow ID/VM pool/VLAN ID bits to hash words */
1661 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1662
1663 /* Process bits 0 and 16 */
1664 IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
1665
1666 /*
1667 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1668 * delay this because bit 0 of the stream should not be processed
1669 * so we do not add the VLAN until after bit 0 was processed
1670 */
1671 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1672
1673 /* Process remaining 30 bit of the key */
1674 for (i = 1; i <= 15; i++)
1675 IXGBE_COMPUTE_BKT_HASH_ITERATION(i);
1676
1677 /*
1678 * Limit hash to 13 bits since max bucket count is 8K.
1679 * Store result at the end of the input stream.
1680 */
1681 input->formatted.bkt_hash = bucket_hash & 0x1FFF;
1682 }
1683
1684 /**
1685 * ixgbe_get_fdirtcpm_82599 - generate a TCP port from atr_input_masks
1686 * @input_mask: mask to be bit swapped
1687 *
1688 * The source and destination port masks for flow director are bit swapped
1689 * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to
1690 * generate a correctly swapped value we need to bit swap the mask and that
1691 * is what is accomplished by this function.
1692 **/
ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input * input_mask)1693 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
1694 {
1695 u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
1696 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
1697 mask |= (u32)IXGBE_NTOHS(input_mask->formatted.src_port);
1698 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
1699 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
1700 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
1701 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
1702 }
1703
1704 /*
1705 * These two macros are meant to address the fact that we have registers
1706 * that are either all or in part big-endian. As a result on big-endian
1707 * systems we will end up byte swapping the value to little-endian before
1708 * it is byte swapped again and written to the hardware in the original
1709 * big-endian format.
1710 */
1711 #define IXGBE_STORE_AS_BE32(_value) \
1712 (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
1713 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
1714
1715 #define IXGBE_WRITE_REG_BE32(a, reg, value) \
1716 IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
1717
1718 #define IXGBE_STORE_AS_BE16(_value) \
1719 IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
1720
ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw * hw,union ixgbe_atr_input * input_mask,bool cloud_mode)1721 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
1722 union ixgbe_atr_input *input_mask, bool cloud_mode)
1723 {
1724 /* mask IPv6 since it is currently not supported */
1725 u32 fdirm = IXGBE_FDIRM_DIPv6;
1726 u32 fdirtcpm;
1727 u32 fdirip6m;
1728 UNREFERENCED_1PARAMETER(cloud_mode);
1729 DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
1730
1731 /*
1732 * Program the relevant mask registers. If src/dst_port or src/dst_addr
1733 * are zero, then assume a full mask for that field. Also assume that
1734 * a VLAN of 0 is unspecified, so mask that out as well. L4type
1735 * cannot be masked out in this implementation.
1736 *
1737 * This also assumes IPv4 only. IPv6 masking isn't supported at this
1738 * point in time.
1739 */
1740
1741 /* verify bucket hash is cleared on hash generation */
1742 if (input_mask->formatted.bkt_hash)
1743 DEBUGOUT(" bucket hash should always be 0 in mask\n");
1744
1745 /* Program FDIRM and verify partial masks */
1746 switch (input_mask->formatted.vm_pool & 0x7F) {
1747 case 0x0:
1748 fdirm |= IXGBE_FDIRM_POOL;
1749 case 0x7F:
1750 break;
1751 default:
1752 DEBUGOUT(" Error on vm pool mask\n");
1753 return IXGBE_ERR_CONFIG;
1754 }
1755
1756 switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
1757 case 0x0:
1758 fdirm |= IXGBE_FDIRM_L4P;
1759 if (input_mask->formatted.dst_port ||
1760 input_mask->formatted.src_port) {
1761 DEBUGOUT(" Error on src/dst port mask\n");
1762 return IXGBE_ERR_CONFIG;
1763 }
1764 case IXGBE_ATR_L4TYPE_MASK:
1765 break;
1766 default:
1767 DEBUGOUT(" Error on flow type mask\n");
1768 return IXGBE_ERR_CONFIG;
1769 }
1770
1771 switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
1772 case 0x0000:
1773 /* mask VLAN ID */
1774 fdirm |= IXGBE_FDIRM_VLANID;
1775 /* mask VLAN priority */
1776 fdirm |= IXGBE_FDIRM_VLANP;
1777 break;
1778 case 0x0FFF:
1779 /* mask VLAN priority */
1780 fdirm |= IXGBE_FDIRM_VLANP;
1781 break;
1782 case 0xE000:
1783 /* mask VLAN ID only */
1784 fdirm |= IXGBE_FDIRM_VLANID;
1785 /* fall through */
1786 case 0xEFFF:
1787 /* no VLAN fields masked */
1788 break;
1789 default:
1790 DEBUGOUT(" Error on VLAN mask\n");
1791 return IXGBE_ERR_CONFIG;
1792 }
1793
1794 switch (input_mask->formatted.flex_bytes & 0xFFFF) {
1795 case 0x0000:
1796 /* Mask Flex Bytes */
1797 fdirm |= IXGBE_FDIRM_FLEX;
1798 /* fall through */
1799 case 0xFFFF:
1800 break;
1801 default:
1802 DEBUGOUT(" Error on flexible byte mask\n");
1803 return IXGBE_ERR_CONFIG;
1804 }
1805
1806 if (cloud_mode) {
1807 fdirm |= IXGBE_FDIRM_L3P;
1808 fdirip6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
1809 fdirip6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
1810
1811 switch (input_mask->formatted.inner_mac[0] & 0xFF) {
1812 case 0x00:
1813 /* Mask inner MAC, fall through */
1814 fdirip6m |= IXGBE_FDIRIP6M_INNER_MAC;
1815 case 0xFF:
1816 break;
1817 default:
1818 DEBUGOUT(" Error on inner_mac byte mask\n");
1819 return IXGBE_ERR_CONFIG;
1820 }
1821
1822 switch (input_mask->formatted.tni_vni & 0xFFFFFFFF) {
1823 case 0x0:
1824 /* Mask vxlan id */
1825 fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI;
1826 break;
1827 case 0x00FFFFFF:
1828 fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
1829 break;
1830 case 0xFFFFFFFF:
1831 break;
1832 default:
1833 DEBUGOUT(" Error on TNI/VNI byte mask\n");
1834 return IXGBE_ERR_CONFIG;
1835 }
1836
1837 switch (input_mask->formatted.tunnel_type & 0xFFFF) {
1838 case 0x0:
1839 /* Mask turnnel type, fall through */
1840 fdirip6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
1841 case 0xFFFF:
1842 break;
1843 default:
1844 DEBUGOUT(" Error on tunnel type byte mask\n");
1845 return IXGBE_ERR_CONFIG;
1846 }
1847 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, fdirip6m);
1848
1849 /* Set all bits in FDIRTCPM, FDIRUDPM, FDIRSCTPM,
1850 * FDIRSIP4M and FDIRDIP4M in cloud mode to allow
1851 * L3/L3 packets to tunnel.
1852 */
1853 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF);
1854 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF);
1855 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
1856 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
1857 switch (hw->mac.type) {
1858 case ixgbe_mac_X550:
1859 case ixgbe_mac_X550EM_x:
1860 case ixgbe_mac_X550EM_a:
1861 IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF);
1862 break;
1863 default:
1864 break;
1865 }
1866 }
1867
1868 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
1869 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
1870
1871 if (!cloud_mode) {
1872 /* store the TCP/UDP port masks, bit reversed from port
1873 * layout */
1874 fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
1875
1876 /* write both the same so that UDP and TCP use the same mask */
1877 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
1878 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
1879 /* also use it for SCTP */
1880 switch (hw->mac.type) {
1881 case ixgbe_mac_X550:
1882 case ixgbe_mac_X550EM_x:
1883 case ixgbe_mac_X550EM_a:
1884 IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
1885 break;
1886 default:
1887 break;
1888 }
1889
1890 /* store source and destination IP masks (big-enian) */
1891 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
1892 ~input_mask->formatted.src_ip[0]);
1893 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
1894 ~input_mask->formatted.dst_ip[0]);
1895 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, 0xFFFFFFFF);
1896 }
1897 return IXGBE_SUCCESS;
1898 }
1899
ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw * hw,union ixgbe_atr_input * input,u16 soft_id,u8 queue,bool cloud_mode)1900 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
1901 union ixgbe_atr_input *input,
1902 u16 soft_id, u8 queue, bool cloud_mode)
1903 {
1904 u32 fdirport, fdirvlan, fdirhash, fdircmd;
1905 u32 addr_low, addr_high;
1906 u32 cloud_type = 0;
1907 s32 err;
1908 UNREFERENCED_1PARAMETER(cloud_mode);
1909
1910 DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
1911 if (!cloud_mode) {
1912 /* currently IPv6 is not supported, must be programmed with 0 */
1913 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
1914 input->formatted.src_ip[0]);
1915 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
1916 input->formatted.src_ip[1]);
1917 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
1918 input->formatted.src_ip[2]);
1919
1920 /* record the source address (big-endian) */
1921 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA,
1922 input->formatted.src_ip[0]);
1923
1924 /* record the first 32 bits of the destination address
1925 * (big-endian) */
1926 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA,
1927 input->formatted.dst_ip[0]);
1928
1929 /* record source and destination port (little-endian)*/
1930 fdirport = IXGBE_NTOHS(input->formatted.dst_port);
1931 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
1932 fdirport |= (u32)IXGBE_NTOHS(input->formatted.src_port);
1933 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
1934 }
1935
1936 /* record VLAN (little-endian) and flex_bytes(big-endian) */
1937 fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
1938 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
1939 fdirvlan |= (u32)IXGBE_NTOHS(input->formatted.vlan_id);
1940 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
1941
1942 if (cloud_mode) {
1943 if (input->formatted.tunnel_type != 0)
1944 cloud_type = 0x80000000;
1945
1946 addr_low = ((u32)input->formatted.inner_mac[0] |
1947 ((u32)input->formatted.inner_mac[1] << 8) |
1948 ((u32)input->formatted.inner_mac[2] << 16) |
1949 ((u32)input->formatted.inner_mac[3] << 24));
1950 addr_high = ((u32)input->formatted.inner_mac[4] |
1951 ((u32)input->formatted.inner_mac[5] << 8));
1952 cloud_type |= addr_high;
1953 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), addr_low);
1954 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), cloud_type);
1955 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.tni_vni);
1956 }
1957
1958 /* configure FDIRHASH register */
1959 fdirhash = input->formatted.bkt_hash;
1960 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1961 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1962
1963 /*
1964 * flush all previous writes to make certain registers are
1965 * programmed prior to issuing the command
1966 */
1967 IXGBE_WRITE_FLUSH(hw);
1968
1969 /* configure FDIRCMD register */
1970 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1971 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1972 if (queue == IXGBE_FDIR_DROP_QUEUE)
1973 fdircmd |= IXGBE_FDIRCMD_DROP;
1974 if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK)
1975 fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
1976 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1977 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1978 fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
1979
1980 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1981 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1982 if (err) {
1983 DEBUGOUT("Flow Director command did not complete!\n");
1984 return err;
1985 }
1986
1987 return IXGBE_SUCCESS;
1988 }
1989
ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw * hw,union ixgbe_atr_input * input,u16 soft_id)1990 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
1991 union ixgbe_atr_input *input,
1992 u16 soft_id)
1993 {
1994 u32 fdirhash;
1995 u32 fdircmd;
1996 s32 err;
1997
1998 /* configure FDIRHASH register */
1999 fdirhash = input->formatted.bkt_hash;
2000 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
2001 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
2002
2003 /* flush hash to HW */
2004 IXGBE_WRITE_FLUSH(hw);
2005
2006 /* Query if filter is present */
2007 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
2008
2009 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
2010 if (err) {
2011 DEBUGOUT("Flow Director command did not complete!\n");
2012 return err;
2013 }
2014
2015 /* if filter exists in hardware then remove it */
2016 if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
2017 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
2018 IXGBE_WRITE_FLUSH(hw);
2019 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
2020 IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
2021 }
2022
2023 return IXGBE_SUCCESS;
2024 }
2025
2026 /**
2027 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
2028 * @hw: pointer to hardware structure
2029 * @input: input bitstream
2030 * @input_mask: mask for the input bitstream
2031 * @soft_id: software index for the filters
2032 * @queue: queue index to direct traffic to
2033 * @cloud_mode: unused
2034 *
2035 * Note that the caller to this function must lock before calling, since the
2036 * hardware writes must be protected from one another.
2037 **/
ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw * hw,union ixgbe_atr_input * input,union ixgbe_atr_input * input_mask,u16 soft_id,u8 queue,bool cloud_mode)2038 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
2039 union ixgbe_atr_input *input,
2040 union ixgbe_atr_input *input_mask,
2041 u16 soft_id, u8 queue, bool cloud_mode)
2042 {
2043 s32 err = IXGBE_ERR_CONFIG;
2044 UNREFERENCED_1PARAMETER(cloud_mode);
2045
2046 DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
2047
2048 /*
2049 * Check flow_type formatting, and bail out before we touch the hardware
2050 * if there's a configuration issue
2051 */
2052 switch (input->formatted.flow_type) {
2053 case IXGBE_ATR_FLOW_TYPE_IPV4:
2054 case IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4:
2055 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
2056 if (input->formatted.dst_port || input->formatted.src_port) {
2057 DEBUGOUT(" Error on src/dst port\n");
2058 return IXGBE_ERR_CONFIG;
2059 }
2060 break;
2061 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
2062 case IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4:
2063 if (input->formatted.dst_port || input->formatted.src_port) {
2064 DEBUGOUT(" Error on src/dst port\n");
2065 return IXGBE_ERR_CONFIG;
2066 }
2067 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
2068 IXGBE_ATR_L4TYPE_MASK;
2069 break;
2070 case IXGBE_ATR_FLOW_TYPE_TCPV4:
2071 case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4:
2072 case IXGBE_ATR_FLOW_TYPE_UDPV4:
2073 case IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4:
2074 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
2075 IXGBE_ATR_L4TYPE_MASK;
2076 break;
2077 default:
2078 DEBUGOUT(" Error on flow type input\n");
2079 return err;
2080 }
2081
2082 /* program input mask into the HW */
2083 err = ixgbe_fdir_set_input_mask_82599(hw, input_mask, cloud_mode);
2084 if (err)
2085 return err;
2086
2087 /* apply mask and compute/store hash */
2088 ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
2089
2090 /* program filters to filter memory */
2091 return ixgbe_fdir_write_perfect_filter_82599(hw, input,
2092 soft_id, queue, cloud_mode);
2093 }
2094
2095 /**
2096 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
2097 * @hw: pointer to hardware structure
2098 * @reg: analog register to read
2099 * @val: read value
2100 *
2101 * Performs read operation to Omer analog register specified.
2102 **/
ixgbe_read_analog_reg8_82599(struct ixgbe_hw * hw,u32 reg,u8 * val)2103 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
2104 {
2105 u32 core_ctl;
2106
2107 DEBUGFUNC("ixgbe_read_analog_reg8_82599");
2108
2109 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
2110 (reg << 8));
2111 IXGBE_WRITE_FLUSH(hw);
2112 usec_delay(10);
2113 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
2114 *val = (u8)core_ctl;
2115
2116 return IXGBE_SUCCESS;
2117 }
2118
2119 /**
2120 * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
2121 * @hw: pointer to hardware structure
2122 * @reg: atlas register to write
2123 * @val: value to write
2124 *
2125 * Performs write operation to Omer analog register specified.
2126 **/
ixgbe_write_analog_reg8_82599(struct ixgbe_hw * hw,u32 reg,u8 val)2127 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
2128 {
2129 u32 core_ctl;
2130
2131 DEBUGFUNC("ixgbe_write_analog_reg8_82599");
2132
2133 core_ctl = (reg << 8) | val;
2134 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
2135 IXGBE_WRITE_FLUSH(hw);
2136 usec_delay(10);
2137
2138 return IXGBE_SUCCESS;
2139 }
2140
2141 /**
2142 * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
2143 * @hw: pointer to hardware structure
2144 *
2145 * Starts the hardware using the generic start_hw function
2146 * and the generation start_hw function.
2147 * Then performs revision-specific operations, if any.
2148 **/
ixgbe_start_hw_82599(struct ixgbe_hw * hw)2149 s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
2150 {
2151 s32 ret_val = IXGBE_SUCCESS;
2152
2153 DEBUGFUNC("ixgbe_start_hw_82599");
2154
2155 ret_val = ixgbe_start_hw_generic(hw);
2156 if (ret_val != IXGBE_SUCCESS)
2157 goto out;
2158
2159 ixgbe_start_hw_gen2(hw);
2160
2161 /* We need to run link autotry after the driver loads */
2162 hw->mac.autotry_restart = true;
2163
2164 if (ret_val == IXGBE_SUCCESS)
2165 ret_val = ixgbe_verify_fw_version_82599(hw);
2166 out:
2167 return ret_val;
2168 }
2169
2170 /**
2171 * ixgbe_identify_phy_82599 - Get physical layer module
2172 * @hw: pointer to hardware structure
2173 *
2174 * Determines the physical layer module found on the current adapter.
2175 * If PHY already detected, maintains current PHY type in hw struct,
2176 * otherwise executes the PHY detection routine.
2177 **/
ixgbe_identify_phy_82599(struct ixgbe_hw * hw)2178 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
2179 {
2180 s32 status;
2181
2182 DEBUGFUNC("ixgbe_identify_phy_82599");
2183
2184 /* Detect PHY if not unknown - returns success if already detected. */
2185 status = ixgbe_identify_phy_generic(hw);
2186 if (status != IXGBE_SUCCESS) {
2187 /* 82599 10GBASE-T requires an external PHY */
2188 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
2189 return status;
2190 else
2191 status = ixgbe_identify_module_generic(hw);
2192 }
2193
2194 /* Set PHY type none if no PHY detected */
2195 if (hw->phy.type == ixgbe_phy_unknown) {
2196 hw->phy.type = ixgbe_phy_none;
2197 return IXGBE_SUCCESS;
2198 }
2199
2200 /* Return error if SFP module has been detected but is not supported */
2201 if (hw->phy.type == ixgbe_phy_sfp_unsupported)
2202 return IXGBE_ERR_SFP_NOT_SUPPORTED;
2203
2204 return status;
2205 }
2206
2207 /**
2208 * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
2209 * @hw: pointer to hardware structure
2210 *
2211 * Determines physical layer capabilities of the current configuration.
2212 **/
ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw * hw)2213 u64 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
2214 {
2215 u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
2216 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2217 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2218 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
2219 u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
2220 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
2221 u16 ext_ability = 0;
2222
2223 DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
2224
2225 hw->phy.ops.identify(hw);
2226
2227 switch (hw->phy.type) {
2228 case ixgbe_phy_tn:
2229 case ixgbe_phy_cu_unknown:
2230 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
2231 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
2232 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
2233 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
2234 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
2235 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
2236 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
2237 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
2238 goto out;
2239 default:
2240 break;
2241 }
2242
2243 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
2244 case IXGBE_AUTOC_LMS_1G_AN:
2245 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
2246 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
2247 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
2248 IXGBE_PHYSICAL_LAYER_1000BASE_BX;
2249 goto out;
2250 } else
2251 /* SFI mode so read SFP module */
2252 goto sfp_check;
2253 break;
2254 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
2255 if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
2256 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
2257 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
2258 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2259 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
2260 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
2261 goto out;
2262 break;
2263 case IXGBE_AUTOC_LMS_10G_SERIAL:
2264 if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
2265 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2266 goto out;
2267 } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
2268 goto sfp_check;
2269 break;
2270 case IXGBE_AUTOC_LMS_KX4_KX_KR:
2271 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
2272 if (autoc & IXGBE_AUTOC_KX_SUPP)
2273 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
2274 if (autoc & IXGBE_AUTOC_KX4_SUPP)
2275 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2276 if (autoc & IXGBE_AUTOC_KR_SUPP)
2277 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2278 goto out;
2279 break;
2280 default:
2281 goto out;
2282 break;
2283 }
2284
2285 sfp_check:
2286 /* SFP check must be done last since DA modules are sometimes used to
2287 * test KR mode - we need to id KR mode correctly before SFP module.
2288 * Call identify_sfp because the pluggable module may have changed */
2289 physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
2290 out:
2291 return physical_layer;
2292 }
2293
2294 /**
2295 * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
2296 * @hw: pointer to hardware structure
2297 * @regval: register value to write to RXCTRL
2298 *
2299 * Enables the Rx DMA unit for 82599
2300 **/
ixgbe_enable_rx_dma_82599(struct ixgbe_hw * hw,u32 regval)2301 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
2302 {
2303
2304 DEBUGFUNC("ixgbe_enable_rx_dma_82599");
2305
2306 /*
2307 * Workaround for 82599 silicon errata when enabling the Rx datapath.
2308 * If traffic is incoming before we enable the Rx unit, it could hang
2309 * the Rx DMA unit. Therefore, make sure the security engine is
2310 * completely disabled prior to enabling the Rx unit.
2311 */
2312
2313 hw->mac.ops.disable_sec_rx_path(hw);
2314
2315 if (regval & IXGBE_RXCTRL_RXEN)
2316 ixgbe_enable_rx(hw);
2317 else
2318 ixgbe_disable_rx(hw);
2319
2320 hw->mac.ops.enable_sec_rx_path(hw);
2321
2322 return IXGBE_SUCCESS;
2323 }
2324
2325 /**
2326 * ixgbe_verify_fw_version_82599 - verify FW version for 82599
2327 * @hw: pointer to hardware structure
2328 *
2329 * Verifies that installed the firmware version is 0.6 or higher
2330 * for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
2331 *
2332 * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
2333 * if the FW version is not supported.
2334 **/
ixgbe_verify_fw_version_82599(struct ixgbe_hw * hw)2335 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2336 {
2337 s32 status = IXGBE_ERR_EEPROM_VERSION;
2338 u16 fw_offset, fw_ptp_cfg_offset;
2339 u16 fw_version;
2340
2341 DEBUGFUNC("ixgbe_verify_fw_version_82599");
2342
2343 /* firmware check is only necessary for SFI devices */
2344 if (hw->phy.media_type != ixgbe_media_type_fiber) {
2345 status = IXGBE_SUCCESS;
2346 goto fw_version_out;
2347 }
2348
2349 /* get the offset to the Firmware Module block */
2350 if (hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset)) {
2351 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2352 "eeprom read at offset %d failed", IXGBE_FW_PTR);
2353 return IXGBE_ERR_EEPROM_VERSION;
2354 }
2355
2356 if ((fw_offset == 0) || (fw_offset == 0xFFFF))
2357 goto fw_version_out;
2358
2359 /* get the offset to the Pass Through Patch Configuration block */
2360 if (hw->eeprom.ops.read(hw, (fw_offset +
2361 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
2362 &fw_ptp_cfg_offset)) {
2363 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2364 "eeprom read at offset %d failed",
2365 fw_offset +
2366 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR);
2367 return IXGBE_ERR_EEPROM_VERSION;
2368 }
2369
2370 if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
2371 goto fw_version_out;
2372
2373 /* get the firmware version */
2374 if (hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
2375 IXGBE_FW_PATCH_VERSION_4), &fw_version)) {
2376 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2377 "eeprom read at offset %d failed",
2378 fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4);
2379 return IXGBE_ERR_EEPROM_VERSION;
2380 }
2381
2382 if (fw_version > 0x5)
2383 status = IXGBE_SUCCESS;
2384
2385 fw_version_out:
2386 return status;
2387 }
2388
2389 /**
2390 * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
2391 * @hw: pointer to hardware structure
2392 *
2393 * Returns true if the LESM FW module is present and enabled. Otherwise
2394 * returns false. Smart Speed must be disabled if LESM FW module is enabled.
2395 **/
ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw * hw)2396 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
2397 {
2398 bool lesm_enabled = false;
2399 u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
2400 s32 status;
2401
2402 DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599");
2403
2404 /* get the offset to the Firmware Module block */
2405 status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2406
2407 if ((status != IXGBE_SUCCESS) ||
2408 (fw_offset == 0) || (fw_offset == 0xFFFF))
2409 goto out;
2410
2411 /* get the offset to the LESM Parameters block */
2412 status = hw->eeprom.ops.read(hw, (fw_offset +
2413 IXGBE_FW_LESM_PARAMETERS_PTR),
2414 &fw_lesm_param_offset);
2415
2416 if ((status != IXGBE_SUCCESS) ||
2417 (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
2418 goto out;
2419
2420 /* get the LESM state word */
2421 status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
2422 IXGBE_FW_LESM_STATE_1),
2423 &fw_lesm_state);
2424
2425 if ((status == IXGBE_SUCCESS) &&
2426 (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
2427 lesm_enabled = true;
2428
2429 out:
2430 return lesm_enabled;
2431 }
2432
2433 /**
2434 * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
2435 * fastest available method
2436 *
2437 * @hw: pointer to hardware structure
2438 * @offset: offset of word in EEPROM to read
2439 * @words: number of words
2440 * @data: word(s) read from the EEPROM
2441 *
2442 * Retrieves 16 bit word(s) read from EEPROM
2443 **/
ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)2444 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
2445 u16 words, u16 *data)
2446 {
2447 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2448 s32 ret_val = IXGBE_ERR_CONFIG;
2449
2450 DEBUGFUNC("ixgbe_read_eeprom_buffer_82599");
2451
2452 /*
2453 * If EEPROM is detected and can be addressed using 14 bits,
2454 * use EERD otherwise use bit bang
2455 */
2456 if ((eeprom->type == ixgbe_eeprom_spi) &&
2457 (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
2458 ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
2459 data);
2460 else
2461 ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
2462 words,
2463 data);
2464
2465 return ret_val;
2466 }
2467
2468 /**
2469 * ixgbe_read_eeprom_82599 - Read EEPROM word using
2470 * fastest available method
2471 *
2472 * @hw: pointer to hardware structure
2473 * @offset: offset of word in the EEPROM to read
2474 * @data: word read from the EEPROM
2475 *
2476 * Reads a 16 bit word from the EEPROM
2477 **/
ixgbe_read_eeprom_82599(struct ixgbe_hw * hw,u16 offset,u16 * data)2478 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
2479 u16 offset, u16 *data)
2480 {
2481 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2482 s32 ret_val = IXGBE_ERR_CONFIG;
2483
2484 DEBUGFUNC("ixgbe_read_eeprom_82599");
2485
2486 /*
2487 * If EEPROM is detected and can be addressed using 14 bits,
2488 * use EERD otherwise use bit bang
2489 */
2490 if ((eeprom->type == ixgbe_eeprom_spi) &&
2491 (offset <= IXGBE_EERD_MAX_ADDR))
2492 ret_val = ixgbe_read_eerd_generic(hw, offset, data);
2493 else
2494 ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
2495
2496 return ret_val;
2497 }
2498
2499 /**
2500 * ixgbe_reset_pipeline_82599 - perform pipeline reset
2501 *
2502 * @hw: pointer to hardware structure
2503 *
2504 * Reset pipeline by asserting Restart_AN together with LMS change to ensure
2505 * full pipeline reset. This function assumes the SW/FW lock is held.
2506 **/
ixgbe_reset_pipeline_82599(struct ixgbe_hw * hw)2507 s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
2508 {
2509 s32 ret_val;
2510 u32 anlp1_reg = 0;
2511 u32 i, autoc_reg, autoc2_reg;
2512
2513 /* Enable link if disabled in NVM */
2514 autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2515 if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
2516 autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
2517 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
2518 IXGBE_WRITE_FLUSH(hw);
2519 }
2520
2521 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2522 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2523 /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
2524 IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
2525 autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT));
2526 /* Wait for AN to leave state 0 */
2527 for (i = 0; i < 10; i++) {
2528 msec_delay(4);
2529 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2530 if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
2531 break;
2532 }
2533
2534 if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
2535 DEBUGOUT("auto negotiation not completed\n");
2536 ret_val = IXGBE_ERR_RESET_FAILED;
2537 goto reset_pipeline_out;
2538 }
2539
2540 ret_val = IXGBE_SUCCESS;
2541
2542 reset_pipeline_out:
2543 /* Write AUTOC register with original LMS field and Restart_AN */
2544 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2545 IXGBE_WRITE_FLUSH(hw);
2546
2547 return ret_val;
2548 }
2549
2550 /**
2551 * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
2552 * @hw: pointer to hardware structure
2553 * @byte_offset: byte offset to read
2554 * @dev_addr: address to read from
2555 * @data: value read
2556 *
2557 * Performs byte read operation to SFP module's EEPROM over I2C interface at
2558 * a specified device address.
2559 **/
ixgbe_read_i2c_byte_82599(struct ixgbe_hw * hw,u8 byte_offset,u8 dev_addr,u8 * data)2560 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
2561 u8 dev_addr, u8 *data)
2562 {
2563 u32 esdp;
2564 s32 status;
2565 s32 timeout = 200;
2566
2567 DEBUGFUNC("ixgbe_read_i2c_byte_82599");
2568
2569 if (hw->phy.qsfp_shared_i2c_bus == true) {
2570 /* Acquire I2C bus ownership. */
2571 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2572 esdp |= IXGBE_ESDP_SDP0;
2573 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2574 IXGBE_WRITE_FLUSH(hw);
2575
2576 while (timeout) {
2577 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2578 if (esdp & IXGBE_ESDP_SDP1)
2579 break;
2580
2581 msec_delay(5);
2582 timeout--;
2583 }
2584
2585 if (!timeout) {
2586 DEBUGOUT("Driver can't access resource,"
2587 " acquiring I2C bus timeout.\n");
2588 status = IXGBE_ERR_I2C;
2589 goto release_i2c_access;
2590 }
2591 }
2592
2593 status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data);
2594
2595 release_i2c_access:
2596
2597 if (hw->phy.qsfp_shared_i2c_bus == true) {
2598 /* Release I2C bus ownership. */
2599 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2600 esdp &= ~IXGBE_ESDP_SDP0;
2601 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2602 IXGBE_WRITE_FLUSH(hw);
2603 }
2604
2605 return status;
2606 }
2607
2608 /**
2609 * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
2610 * @hw: pointer to hardware structure
2611 * @byte_offset: byte offset to write
2612 * @dev_addr: address to read from
2613 * @data: value to write
2614 *
2615 * Performs byte write operation to SFP module's EEPROM over I2C interface at
2616 * a specified device address.
2617 **/
ixgbe_write_i2c_byte_82599(struct ixgbe_hw * hw,u8 byte_offset,u8 dev_addr,u8 data)2618 static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
2619 u8 dev_addr, u8 data)
2620 {
2621 u32 esdp;
2622 s32 status;
2623 s32 timeout = 200;
2624
2625 DEBUGFUNC("ixgbe_write_i2c_byte_82599");
2626
2627 if (hw->phy.qsfp_shared_i2c_bus == true) {
2628 /* Acquire I2C bus ownership. */
2629 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2630 esdp |= IXGBE_ESDP_SDP0;
2631 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2632 IXGBE_WRITE_FLUSH(hw);
2633
2634 while (timeout) {
2635 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2636 if (esdp & IXGBE_ESDP_SDP1)
2637 break;
2638
2639 msec_delay(5);
2640 timeout--;
2641 }
2642
2643 if (!timeout) {
2644 DEBUGOUT("Driver can't access resource,"
2645 " acquiring I2C bus timeout.\n");
2646 status = IXGBE_ERR_I2C;
2647 goto release_i2c_access;
2648 }
2649 }
2650
2651 status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data);
2652
2653 release_i2c_access:
2654
2655 if (hw->phy.qsfp_shared_i2c_bus == true) {
2656 /* Release I2C bus ownership. */
2657 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2658 esdp &= ~IXGBE_ESDP_SDP0;
2659 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2660 IXGBE_WRITE_FLUSH(hw);
2661 }
2662
2663 return status;
2664 }
2665