1 /******************************************************************************
2
3 Copyright (c) 2001-2020, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33
34 #include "ixgbe_x550.h"
35 #include "ixgbe_x540.h"
36 #include "ixgbe_type.h"
37 #include "ixgbe_api.h"
38 #include "ixgbe_common.h"
39 #include "ixgbe_phy.h"
40
41 static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed);
42 static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
43 static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
44 static s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw);
45
46 /**
47 * ixgbe_init_ops_X550 - Inits func ptrs and MAC type
48 * @hw: pointer to hardware structure
49 *
50 * Initialize the function pointers and assign the MAC type for X550.
51 * Does not touch the hardware.
52 **/
ixgbe_init_ops_X550(struct ixgbe_hw * hw)53 s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw)
54 {
55 struct ixgbe_mac_info *mac = &hw->mac;
56 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
57 s32 ret_val;
58
59 DEBUGFUNC("ixgbe_init_ops_X550");
60
61 ret_val = ixgbe_init_ops_X540(hw);
62 mac->ops.dmac_config = ixgbe_dmac_config_X550;
63 mac->ops.dmac_config_tcs = ixgbe_dmac_config_tcs_X550;
64 mac->ops.dmac_update_tcs = ixgbe_dmac_update_tcs_X550;
65 mac->ops.setup_eee = NULL;
66 mac->ops.set_source_address_pruning =
67 ixgbe_set_source_address_pruning_X550;
68 mac->ops.set_ethertype_anti_spoofing =
69 ixgbe_set_ethertype_anti_spoofing_X550;
70
71 mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
72 eeprom->ops.init_params = ixgbe_init_eeprom_params_X550;
73 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
74 eeprom->ops.read = ixgbe_read_ee_hostif_X550;
75 eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
76 eeprom->ops.write = ixgbe_write_ee_hostif_X550;
77 eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
78 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
79 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
80
81 mac->ops.disable_mdd = ixgbe_disable_mdd_X550;
82 mac->ops.enable_mdd = ixgbe_enable_mdd_X550;
83 mac->ops.mdd_event = ixgbe_mdd_event_X550;
84 mac->ops.restore_mdd_vf = ixgbe_restore_mdd_vf_X550;
85 mac->ops.fw_recovery_mode = ixgbe_fw_recovery_mode_X550;
86 mac->ops.disable_rx = ixgbe_disable_rx_x550;
87 /* Manageability interface */
88 mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550;
89 switch (hw->device_id) {
90 case IXGBE_DEV_ID_X550EM_X_1G_T:
91 hw->mac.ops.led_on = NULL;
92 hw->mac.ops.led_off = NULL;
93 break;
94 case IXGBE_DEV_ID_X550EM_X_10G_T:
95 case IXGBE_DEV_ID_X550EM_A_10G_T:
96 hw->mac.ops.led_on = ixgbe_led_on_t_X550em;
97 hw->mac.ops.led_off = ixgbe_led_off_t_X550em;
98 break;
99 default:
100 break;
101 }
102 return ret_val;
103 }
104
105 /**
106 * ixgbe_read_cs4227 - Read CS4227 register
107 * @hw: pointer to hardware structure
108 * @reg: register number to write
109 * @value: pointer to receive value read
110 *
111 * Returns status code
112 **/
ixgbe_read_cs4227(struct ixgbe_hw * hw,u16 reg,u16 * value)113 static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
114 {
115 return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value);
116 }
117
118 /**
119 * ixgbe_write_cs4227 - Write CS4227 register
120 * @hw: pointer to hardware structure
121 * @reg: register number to write
122 * @value: value to write to register
123 *
124 * Returns status code
125 **/
ixgbe_write_cs4227(struct ixgbe_hw * hw,u16 reg,u16 value)126 static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
127 {
128 return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value);
129 }
130
131 /**
132 * ixgbe_read_pe - Read register from port expander
133 * @hw: pointer to hardware structure
134 * @reg: register number to read
135 * @value: pointer to receive read value
136 *
137 * Returns status code
138 **/
ixgbe_read_pe(struct ixgbe_hw * hw,u8 reg,u8 * value)139 static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
140 {
141 s32 status;
142
143 status = ixgbe_read_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
144 if (status != IXGBE_SUCCESS)
145 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
146 "port expander access failed with %d\n", status);
147 return status;
148 }
149
150 /**
151 * ixgbe_write_pe - Write register to port expander
152 * @hw: pointer to hardware structure
153 * @reg: register number to write
154 * @value: value to write
155 *
156 * Returns status code
157 **/
ixgbe_write_pe(struct ixgbe_hw * hw,u8 reg,u8 value)158 static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
159 {
160 s32 status;
161
162 status = ixgbe_write_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
163 if (status != IXGBE_SUCCESS)
164 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
165 "port expander access failed with %d\n", status);
166 return status;
167 }
168
169 /**
170 * ixgbe_reset_cs4227 - Reset CS4227 using port expander
171 * @hw: pointer to hardware structure
172 *
173 * This function assumes that the caller has acquired the proper semaphore.
174 * Returns error code
175 **/
ixgbe_reset_cs4227(struct ixgbe_hw * hw)176 static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
177 {
178 s32 status;
179 u32 retry;
180 u16 value;
181 u8 reg;
182
183 /* Trigger hard reset. */
184 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
185 if (status != IXGBE_SUCCESS)
186 return status;
187 reg |= IXGBE_PE_BIT1;
188 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
189 if (status != IXGBE_SUCCESS)
190 return status;
191
192 status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, ®);
193 if (status != IXGBE_SUCCESS)
194 return status;
195 reg &= ~IXGBE_PE_BIT1;
196 status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg);
197 if (status != IXGBE_SUCCESS)
198 return status;
199
200 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
201 if (status != IXGBE_SUCCESS)
202 return status;
203 reg &= ~IXGBE_PE_BIT1;
204 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
205 if (status != IXGBE_SUCCESS)
206 return status;
207
208 usec_delay(IXGBE_CS4227_RESET_HOLD);
209
210 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
211 if (status != IXGBE_SUCCESS)
212 return status;
213 reg |= IXGBE_PE_BIT1;
214 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
215 if (status != IXGBE_SUCCESS)
216 return status;
217
218 /* Wait for the reset to complete. */
219 msec_delay(IXGBE_CS4227_RESET_DELAY);
220 for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
221 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EFUSE_STATUS,
222 &value);
223 if (status == IXGBE_SUCCESS &&
224 value == IXGBE_CS4227_EEPROM_LOAD_OK)
225 break;
226 msec_delay(IXGBE_CS4227_CHECK_DELAY);
227 }
228 if (retry == IXGBE_CS4227_RETRIES) {
229 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
230 "CS4227 reset did not complete.");
231 return IXGBE_ERR_PHY;
232 }
233
234 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value);
235 if (status != IXGBE_SUCCESS ||
236 !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) {
237 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
238 "CS4227 EEPROM did not load successfully.");
239 return IXGBE_ERR_PHY;
240 }
241
242 return IXGBE_SUCCESS;
243 }
244
245 /**
246 * ixgbe_check_cs4227 - Check CS4227 and reset as needed
247 * @hw: pointer to hardware structure
248 **/
ixgbe_check_cs4227(struct ixgbe_hw * hw)249 static void ixgbe_check_cs4227(struct ixgbe_hw *hw)
250 {
251 s32 status = IXGBE_SUCCESS;
252 u32 swfw_mask = hw->phy.phy_semaphore_mask;
253 u16 value = 0;
254 u8 retry;
255
256 for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
257 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
258 if (status != IXGBE_SUCCESS) {
259 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
260 "semaphore failed with %d", status);
261 msec_delay(IXGBE_CS4227_CHECK_DELAY);
262 continue;
263 }
264
265 /* Get status of reset flow. */
266 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value);
267
268 if (status == IXGBE_SUCCESS &&
269 value == IXGBE_CS4227_RESET_COMPLETE)
270 goto out;
271
272 if (status != IXGBE_SUCCESS ||
273 value != IXGBE_CS4227_RESET_PENDING)
274 break;
275
276 /* Reset is pending. Wait and check again. */
277 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
278 msec_delay(IXGBE_CS4227_CHECK_DELAY);
279 }
280
281 /* If still pending, assume other instance failed. */
282 if (retry == IXGBE_CS4227_RETRIES) {
283 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
284 if (status != IXGBE_SUCCESS) {
285 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
286 "semaphore failed with %d", status);
287 return;
288 }
289 }
290
291 /* Reset the CS4227. */
292 status = ixgbe_reset_cs4227(hw);
293 if (status != IXGBE_SUCCESS) {
294 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
295 "CS4227 reset failed: %d", status);
296 goto out;
297 }
298
299 /* Reset takes so long, temporarily release semaphore in case the
300 * other driver instance is waiting for the reset indication.
301 */
302 ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
303 IXGBE_CS4227_RESET_PENDING);
304 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
305 msec_delay(10);
306 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
307 if (status != IXGBE_SUCCESS) {
308 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
309 "semaphore failed with %d", status);
310 return;
311 }
312
313 /* Record completion for next time. */
314 status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
315 IXGBE_CS4227_RESET_COMPLETE);
316
317 out:
318 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
319 msec_delay(hw->eeprom.semaphore_delay);
320 }
321
322 /**
323 * ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control
324 * @hw: pointer to hardware structure
325 **/
ixgbe_setup_mux_ctl(struct ixgbe_hw * hw)326 static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
327 {
328 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
329
330 if (hw->bus.lan_id) {
331 esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
332 esdp |= IXGBE_ESDP_SDP1_DIR;
333 }
334 esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
335 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
336 IXGBE_WRITE_FLUSH(hw);
337 }
338
339 /**
340 * ixgbe_identify_phy_x550em - Get PHY type based on device id
341 * @hw: pointer to hardware structure
342 *
343 * Returns error code
344 */
ixgbe_identify_phy_x550em(struct ixgbe_hw * hw)345 static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
346 {
347 hw->mac.ops.set_lan_id(hw);
348
349 ixgbe_read_mng_if_sel_x550em(hw);
350
351 switch (hw->device_id) {
352 case IXGBE_DEV_ID_X550EM_A_SFP:
353 return ixgbe_identify_sfp_module_X550em(hw);
354 case IXGBE_DEV_ID_X550EM_X_SFP:
355 /* set up for CS4227 usage */
356 ixgbe_setup_mux_ctl(hw);
357 ixgbe_check_cs4227(hw);
358 return ixgbe_identify_sfp_module_X550em(hw);
359 case IXGBE_DEV_ID_X550EM_A_SFP_N:
360 return ixgbe_identify_sfp_module_X550em(hw);
361 break;
362 case IXGBE_DEV_ID_X550EM_X_KX4:
363 hw->phy.type = ixgbe_phy_x550em_kx4;
364 break;
365 case IXGBE_DEV_ID_X550EM_X_XFI:
366 hw->phy.type = ixgbe_phy_x550em_xfi;
367 break;
368 case IXGBE_DEV_ID_X550EM_X_KR:
369 case IXGBE_DEV_ID_X550EM_A_KR:
370 case IXGBE_DEV_ID_X550EM_A_KR_L:
371 hw->phy.type = ixgbe_phy_x550em_kr;
372 break;
373 case IXGBE_DEV_ID_X550EM_A_10G_T:
374 case IXGBE_DEV_ID_X550EM_X_10G_T:
375 return ixgbe_identify_phy_generic(hw);
376 case IXGBE_DEV_ID_X550EM_X_1G_T:
377 hw->phy.type = ixgbe_phy_ext_1g_t;
378 break;
379 case IXGBE_DEV_ID_X550EM_A_1G_T:
380 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
381 hw->phy.type = ixgbe_phy_fw;
382 if (hw->bus.lan_id)
383 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
384 else
385 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
386 break;
387 default:
388 break;
389 }
390 return IXGBE_SUCCESS;
391 }
392
393 /**
394 * ixgbe_fw_phy_activity - Perform an activity on a PHY
395 * @hw: pointer to hardware structure
396 * @activity: activity to perform
397 * @data: Pointer to 4 32-bit words of data
398 */
ixgbe_fw_phy_activity(struct ixgbe_hw * hw,u16 activity,u32 (* data)[FW_PHY_ACT_DATA_COUNT])399 s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
400 u32 (*data)[FW_PHY_ACT_DATA_COUNT])
401 {
402 union {
403 struct ixgbe_hic_phy_activity_req cmd;
404 struct ixgbe_hic_phy_activity_resp rsp;
405 } hic;
406 u16 retries = FW_PHY_ACT_RETRIES;
407 s32 rc;
408 u16 i;
409
410 do {
411 memset(&hic, 0, sizeof(hic));
412 hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD;
413 hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN;
414 hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
415 hic.cmd.port_number = hw->bus.lan_id;
416 hic.cmd.activity_id = IXGBE_CPU_TO_LE16(activity);
417 for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
418 hic.cmd.data[i] = IXGBE_CPU_TO_BE32((*data)[i]);
419
420 rc = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
421 sizeof(hic.cmd),
422 IXGBE_HI_COMMAND_TIMEOUT,
423 true);
424 if (rc != IXGBE_SUCCESS)
425 return rc;
426 if (hic.rsp.hdr.cmd_or_resp.ret_status ==
427 FW_CEM_RESP_STATUS_SUCCESS) {
428 for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
429 (*data)[i] = IXGBE_BE32_TO_CPU(hic.rsp.data[i]);
430 return IXGBE_SUCCESS;
431 }
432 usec_delay(20);
433 --retries;
434 } while (retries > 0);
435
436 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
437 }
438
439 static const struct {
440 u16 fw_speed;
441 ixgbe_link_speed phy_speed;
442 } ixgbe_fw_map[] = {
443 { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL },
444 { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL },
445 { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL },
446 { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL },
447 { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL },
448 { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL },
449 };
450
451 /**
452 * ixgbe_get_phy_id_fw - Get the phy ID via firmware command
453 * @hw: pointer to hardware structure
454 *
455 * Returns error code
456 */
ixgbe_get_phy_id_fw(struct ixgbe_hw * hw)457 static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
458 {
459 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
460 u16 phy_speeds;
461 u16 phy_id_lo;
462 s32 rc;
463 u16 i;
464
465 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info);
466 if (rc)
467 return rc;
468
469 hw->phy.speeds_supported = 0;
470 phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK;
471 for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
472 if (phy_speeds & ixgbe_fw_map[i].fw_speed)
473 hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed;
474 }
475 if (!hw->phy.autoneg_advertised)
476 hw->phy.autoneg_advertised = hw->phy.speeds_supported;
477
478 hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK;
479 phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK;
480 hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK;
481 hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK;
482 if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK)
483 return IXGBE_ERR_PHY_ADDR_INVALID;
484 return IXGBE_SUCCESS;
485 }
486
487 /**
488 * ixgbe_identify_phy_fw - Get PHY type based on firmware command
489 * @hw: pointer to hardware structure
490 *
491 * Returns error code
492 */
ixgbe_identify_phy_fw(struct ixgbe_hw * hw)493 static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
494 {
495 if (hw->bus.lan_id)
496 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
497 else
498 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
499
500 hw->phy.type = ixgbe_phy_fw;
501 hw->phy.ops.read_reg = NULL;
502 hw->phy.ops.write_reg = NULL;
503 return ixgbe_get_phy_id_fw(hw);
504 }
505
506 /**
507 * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY
508 * @hw: pointer to hardware structure
509 *
510 * Returns error code
511 */
ixgbe_shutdown_fw_phy(struct ixgbe_hw * hw)512 s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
513 {
514 u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
515
516 setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF;
517 return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup);
518 }
519
ixgbe_read_phy_reg_x550em(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u16 * phy_data)520 static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
521 u32 device_type, u16 *phy_data)
522 {
523 UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, *phy_data);
524 return IXGBE_NOT_IMPLEMENTED;
525 }
526
ixgbe_write_phy_reg_x550em(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u16 phy_data)527 static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
528 u32 device_type, u16 phy_data)
529 {
530 UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, phy_data);
531 return IXGBE_NOT_IMPLEMENTED;
532 }
533
534 /**
535 * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
536 * @hw: pointer to the hardware structure
537 * @addr: I2C bus address to read from
538 * @reg: I2C device register to read from
539 * @val: pointer to location to receive read value
540 *
541 * Returns an error code on error.
542 **/
ixgbe_read_i2c_combined_generic(struct ixgbe_hw * hw,u8 addr,u16 reg,u16 * val)543 static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
544 u16 reg, u16 *val)
545 {
546 return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true);
547 }
548
549 /**
550 * ixgbe_read_i2c_combined_generic_unlocked - Do I2C read combined operation
551 * @hw: pointer to the hardware structure
552 * @addr: I2C bus address to read from
553 * @reg: I2C device register to read from
554 * @val: pointer to location to receive read value
555 *
556 * Returns an error code on error.
557 **/
558 static s32
ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw * hw,u8 addr,u16 reg,u16 * val)559 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
560 u16 reg, u16 *val)
561 {
562 return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, false);
563 }
564
565 /**
566 * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
567 * @hw: pointer to the hardware structure
568 * @addr: I2C bus address to write to
569 * @reg: I2C device register to write to
570 * @val: value to write
571 *
572 * Returns an error code on error.
573 **/
ixgbe_write_i2c_combined_generic(struct ixgbe_hw * hw,u8 addr,u16 reg,u16 val)574 static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
575 u8 addr, u16 reg, u16 val)
576 {
577 return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true);
578 }
579
580 /**
581 * ixgbe_write_i2c_combined_generic_unlocked - Do I2C write combined operation
582 * @hw: pointer to the hardware structure
583 * @addr: I2C bus address to write to
584 * @reg: I2C device register to write to
585 * @val: value to write
586 *
587 * Returns an error code on error.
588 **/
589 static s32
ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw * hw,u8 addr,u16 reg,u16 val)590 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
591 u8 addr, u16 reg, u16 val)
592 {
593 return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false);
594 }
595
596 /**
597 * ixgbe_init_ops_X550EM - Inits func ptrs and MAC type
598 * @hw: pointer to hardware structure
599 *
600 * Initialize the function pointers and for MAC type X550EM.
601 * Does not touch the hardware.
602 **/
ixgbe_init_ops_X550EM(struct ixgbe_hw * hw)603 s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
604 {
605 struct ixgbe_mac_info *mac = &hw->mac;
606 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
607 struct ixgbe_phy_info *phy = &hw->phy;
608 s32 ret_val;
609
610 DEBUGFUNC("ixgbe_init_ops_X550EM");
611
612 /* Similar to X550 so start there. */
613 ret_val = ixgbe_init_ops_X550(hw);
614
615 /* Since this function eventually calls
616 * ixgbe_init_ops_540 by design, we are setting
617 * the pointers to NULL explicitly here to overwrite
618 * the values being set in the x540 function.
619 */
620 /* Thermal sensor not supported in x550EM */
621 mac->ops.get_thermal_sensor_data = NULL;
622 mac->ops.init_thermal_sensor_thresh = NULL;
623 mac->thermal_sensor_enabled = false;
624
625 /* Bypass not supported in x550EM */
626 mac->ops.bypass_rw = NULL;
627 mac->ops.bypass_valid_rd = NULL;
628 mac->ops.bypass_set = NULL;
629 mac->ops.bypass_rd_eep = NULL;
630
631 /* FCOE not supported in x550EM */
632 mac->ops.get_san_mac_addr = NULL;
633 mac->ops.set_san_mac_addr = NULL;
634 mac->ops.get_wwn_prefix = NULL;
635 mac->ops.get_fcoe_boot_status = NULL;
636
637 /* IPsec not supported in x550EM */
638 mac->ops.disable_sec_rx_path = NULL;
639 mac->ops.enable_sec_rx_path = NULL;
640
641 /* AUTOC register is not present in x550EM. */
642 mac->ops.prot_autoc_read = NULL;
643 mac->ops.prot_autoc_write = NULL;
644
645 /* X550EM bus type is internal*/
646 hw->bus.type = ixgbe_bus_type_internal;
647 mac->ops.get_bus_info = ixgbe_get_bus_info_X550em;
648
649
650 mac->ops.get_media_type = ixgbe_get_media_type_X550em;
651 mac->ops.setup_sfp = ixgbe_setup_sfp_modules_X550em;
652 mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_X550em;
653 mac->ops.reset_hw = ixgbe_reset_hw_X550em;
654 mac->ops.get_supported_physical_layer =
655 ixgbe_get_supported_physical_layer_X550em;
656
657 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
658 mac->ops.setup_fc = ixgbe_setup_fc_generic;
659 else
660 mac->ops.setup_fc = ixgbe_setup_fc_X550em;
661
662 /* PHY */
663 phy->ops.init = ixgbe_init_phy_ops_X550em;
664 switch (hw->device_id) {
665 case IXGBE_DEV_ID_X550EM_A_1G_T:
666 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
667 mac->ops.setup_fc = NULL;
668 phy->ops.identify = ixgbe_identify_phy_fw;
669 phy->ops.set_phy_power = NULL;
670 phy->ops.get_firmware_version = NULL;
671 break;
672 case IXGBE_DEV_ID_X550EM_X_1G_T:
673 mac->ops.setup_fc = NULL;
674 phy->ops.identify = ixgbe_identify_phy_x550em;
675 phy->ops.set_phy_power = NULL;
676 break;
677 default:
678 phy->ops.identify = ixgbe_identify_phy_x550em;
679 }
680
681 if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
682 phy->ops.set_phy_power = NULL;
683
684
685 /* EEPROM */
686 eeprom->ops.init_params = ixgbe_init_eeprom_params_X540;
687 eeprom->ops.read = ixgbe_read_ee_hostif_X550;
688 eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
689 eeprom->ops.write = ixgbe_write_ee_hostif_X550;
690 eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
691 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
692 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
693 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
694
695 return ret_val;
696 }
697
698 /**
699 * ixgbe_setup_fw_link - Setup firmware-controlled PHYs
700 * @hw: pointer to hardware structure
701 */
ixgbe_setup_fw_link(struct ixgbe_hw * hw)702 static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
703 {
704 u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
705 s32 rc;
706 u16 i;
707
708 if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
709 return 0;
710
711 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
712 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
713 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
714 return IXGBE_ERR_INVALID_LINK_SETTINGS;
715 }
716
717 switch (hw->fc.requested_mode) {
718 case ixgbe_fc_full:
719 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX <<
720 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
721 break;
722 case ixgbe_fc_rx_pause:
723 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX <<
724 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
725 break;
726 case ixgbe_fc_tx_pause:
727 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX <<
728 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
729 break;
730 default:
731 break;
732 }
733
734 for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
735 if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed)
736 setup[0] |= (u32)(ixgbe_fw_map[i].fw_speed);
737 }
738 setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN;
739
740 if (hw->phy.eee_speeds_advertised)
741 setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE;
742
743 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup);
744 if (rc)
745 return rc;
746 if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN)
747 return IXGBE_ERR_OVERTEMP;
748 return IXGBE_SUCCESS;
749 }
750
751 /**
752 * ixgbe_fc_autoneg_fw - Set up flow control for FW-controlled PHYs
753 * @hw: pointer to hardware structure
754 *
755 * Called at init time to set up flow control.
756 */
ixgbe_fc_autoneg_fw(struct ixgbe_hw * hw)757 static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
758 {
759 if (hw->fc.requested_mode == ixgbe_fc_default)
760 hw->fc.requested_mode = ixgbe_fc_full;
761
762 return ixgbe_setup_fw_link(hw);
763 }
764
765 /**
766 * ixgbe_setup_eee_fw - Enable/disable EEE support
767 * @hw: pointer to the HW structure
768 * @enable_eee: boolean flag to enable EEE
769 *
770 * Enable/disable EEE based on enable_eee flag.
771 * This function controls EEE for firmware-based PHY implementations.
772 */
ixgbe_setup_eee_fw(struct ixgbe_hw * hw,bool enable_eee)773 static s32 ixgbe_setup_eee_fw(struct ixgbe_hw *hw, bool enable_eee)
774 {
775 if (!!hw->phy.eee_speeds_advertised == enable_eee)
776 return IXGBE_SUCCESS;
777 if (enable_eee)
778 hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
779 else
780 hw->phy.eee_speeds_advertised = 0;
781 return hw->phy.ops.setup_link(hw);
782 }
783
784 /**
785 * ixgbe_init_ops_X550EM_a - Inits func ptrs and MAC type
786 * @hw: pointer to hardware structure
787 *
788 * Initialize the function pointers and for MAC type X550EM_a.
789 * Does not touch the hardware.
790 **/
ixgbe_init_ops_X550EM_a(struct ixgbe_hw * hw)791 s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw)
792 {
793 struct ixgbe_mac_info *mac = &hw->mac;
794 s32 ret_val;
795
796 DEBUGFUNC("ixgbe_init_ops_X550EM_a");
797
798 /* Start with generic X550EM init */
799 ret_val = ixgbe_init_ops_X550EM(hw);
800
801 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
802 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
803 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550a;
804 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550a;
805
806 switch (mac->ops.get_media_type(hw)) {
807 case ixgbe_media_type_fiber:
808 mac->ops.setup_fc = NULL;
809 mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a;
810 break;
811 case ixgbe_media_type_backplane:
812 mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a;
813 mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a;
814 break;
815 default:
816 break;
817 }
818
819 switch (hw->device_id) {
820 case IXGBE_DEV_ID_X550EM_A_1G_T:
821 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
822 mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a;
823 mac->ops.setup_fc = ixgbe_fc_autoneg_fw;
824 mac->ops.setup_eee = ixgbe_setup_eee_fw;
825 hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL |
826 IXGBE_LINK_SPEED_1GB_FULL;
827 hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
828 break;
829 default:
830 break;
831 }
832
833 return ret_val;
834 }
835
836 /**
837 * ixgbe_init_ops_X550EM_x - Inits func ptrs and MAC type
838 * @hw: pointer to hardware structure
839 *
840 * Initialize the function pointers and for MAC type X550EM_x.
841 * Does not touch the hardware.
842 **/
ixgbe_init_ops_X550EM_x(struct ixgbe_hw * hw)843 s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw)
844 {
845 struct ixgbe_mac_info *mac = &hw->mac;
846 struct ixgbe_link_info *link = &hw->link;
847 s32 ret_val;
848
849 DEBUGFUNC("ixgbe_init_ops_X550EM_x");
850
851 /* Start with generic X550EM init */
852 ret_val = ixgbe_init_ops_X550EM(hw);
853
854 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
855 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
856 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550em;
857 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550em;
858 link->ops.read_link = ixgbe_read_i2c_combined_generic;
859 link->ops.read_link_unlocked = ixgbe_read_i2c_combined_generic_unlocked;
860 link->ops.write_link = ixgbe_write_i2c_combined_generic;
861 link->ops.write_link_unlocked =
862 ixgbe_write_i2c_combined_generic_unlocked;
863 link->addr = IXGBE_CS4227;
864
865 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) {
866 mac->ops.setup_fc = NULL;
867 mac->ops.setup_eee = NULL;
868 mac->ops.init_led_link_act = NULL;
869 }
870
871 return ret_val;
872 }
873
874 /**
875 * ixgbe_dmac_config_X550
876 * @hw: pointer to hardware structure
877 *
878 * Configure DMA coalescing. If enabling dmac, dmac is activated.
879 * When disabling dmac, dmac enable dmac bit is cleared.
880 **/
ixgbe_dmac_config_X550(struct ixgbe_hw * hw)881 s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw)
882 {
883 u32 reg, high_pri_tc;
884
885 DEBUGFUNC("ixgbe_dmac_config_X550");
886
887 /* Disable DMA coalescing before configuring */
888 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
889 reg &= ~IXGBE_DMACR_DMAC_EN;
890 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
891
892 /* Disable DMA Coalescing if the watchdog timer is 0 */
893 if (!hw->mac.dmac_config.watchdog_timer)
894 goto out;
895
896 ixgbe_dmac_config_tcs_X550(hw);
897
898 /* Configure DMA Coalescing Control Register */
899 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
900
901 /* Set the watchdog timer in units of 40.96 usec */
902 reg &= ~IXGBE_DMACR_DMACWT_MASK;
903 reg |= (hw->mac.dmac_config.watchdog_timer * 100) / 4096;
904
905 reg &= ~IXGBE_DMACR_HIGH_PRI_TC_MASK;
906 /* If fcoe is enabled, set high priority traffic class */
907 if (hw->mac.dmac_config.fcoe_en) {
908 high_pri_tc = 1 << hw->mac.dmac_config.fcoe_tc;
909 reg |= ((high_pri_tc << IXGBE_DMACR_HIGH_PRI_TC_SHIFT) &
910 IXGBE_DMACR_HIGH_PRI_TC_MASK);
911 }
912 reg |= IXGBE_DMACR_EN_MNG_IND;
913
914 /* Enable DMA coalescing after configuration */
915 reg |= IXGBE_DMACR_DMAC_EN;
916 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
917
918 out:
919 return IXGBE_SUCCESS;
920 }
921
922 /**
923 * ixgbe_dmac_config_tcs_X550
924 * @hw: pointer to hardware structure
925 *
926 * Configure DMA coalescing threshold per TC. The dmac enable bit must
927 * be cleared before configuring.
928 **/
ixgbe_dmac_config_tcs_X550(struct ixgbe_hw * hw)929 s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw)
930 {
931 u32 tc, reg, pb_headroom, rx_pb_size, maxframe_size_kb;
932
933 DEBUGFUNC("ixgbe_dmac_config_tcs_X550");
934
935 /* Configure DMA coalescing enabled */
936 switch (hw->mac.dmac_config.link_speed) {
937 case IXGBE_LINK_SPEED_10_FULL:
938 case IXGBE_LINK_SPEED_100_FULL:
939 pb_headroom = IXGBE_DMACRXT_100M;
940 break;
941 case IXGBE_LINK_SPEED_1GB_FULL:
942 pb_headroom = IXGBE_DMACRXT_1G;
943 break;
944 default:
945 pb_headroom = IXGBE_DMACRXT_10G;
946 break;
947 }
948
949 maxframe_size_kb = ((IXGBE_READ_REG(hw, IXGBE_MAXFRS) >>
950 IXGBE_MHADD_MFS_SHIFT) / 1024);
951
952 /* Set the per Rx packet buffer receive threshold */
953 for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) {
954 reg = IXGBE_READ_REG(hw, IXGBE_DMCTH(tc));
955 reg &= ~IXGBE_DMCTH_DMACRXT_MASK;
956
957 if (tc < hw->mac.dmac_config.num_tcs) {
958 /* Get Rx PB size */
959 rx_pb_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc));
960 rx_pb_size = (rx_pb_size & IXGBE_RXPBSIZE_MASK) >>
961 IXGBE_RXPBSIZE_SHIFT;
962
963 /* Calculate receive buffer threshold in kilobytes */
964 if (rx_pb_size > pb_headroom)
965 rx_pb_size = rx_pb_size - pb_headroom;
966 else
967 rx_pb_size = 0;
968
969 /* Minimum of MFS shall be set for DMCTH */
970 reg |= (rx_pb_size > maxframe_size_kb) ?
971 rx_pb_size : maxframe_size_kb;
972 }
973 IXGBE_WRITE_REG(hw, IXGBE_DMCTH(tc), reg);
974 }
975 return IXGBE_SUCCESS;
976 }
977
978 /**
979 * ixgbe_dmac_update_tcs_X550
980 * @hw: pointer to hardware structure
981 *
982 * Disables dmac, updates per TC settings, and then enables dmac.
983 **/
ixgbe_dmac_update_tcs_X550(struct ixgbe_hw * hw)984 s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw)
985 {
986 u32 reg;
987
988 DEBUGFUNC("ixgbe_dmac_update_tcs_X550");
989
990 /* Disable DMA coalescing before configuring */
991 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
992 reg &= ~IXGBE_DMACR_DMAC_EN;
993 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
994
995 ixgbe_dmac_config_tcs_X550(hw);
996
997 /* Enable DMA coalescing after configuration */
998 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
999 reg |= IXGBE_DMACR_DMAC_EN;
1000 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1001
1002 return IXGBE_SUCCESS;
1003 }
1004
1005 /**
1006 * ixgbe_init_eeprom_params_X550 - Initialize EEPROM params
1007 * @hw: pointer to hardware structure
1008 *
1009 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
1010 * ixgbe_hw struct in order to set up EEPROM access.
1011 **/
ixgbe_init_eeprom_params_X550(struct ixgbe_hw * hw)1012 s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
1013 {
1014 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1015 u32 eec;
1016 u16 eeprom_size;
1017
1018 DEBUGFUNC("ixgbe_init_eeprom_params_X550");
1019
1020 if (eeprom->type == ixgbe_eeprom_uninitialized) {
1021 eeprom->semaphore_delay = 10;
1022 eeprom->type = ixgbe_flash;
1023
1024 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1025 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1026 IXGBE_EEC_SIZE_SHIFT);
1027 eeprom->word_size = 1 << (eeprom_size +
1028 IXGBE_EEPROM_WORD_SIZE_SHIFT);
1029
1030 DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
1031 eeprom->type, eeprom->word_size);
1032 }
1033
1034 return IXGBE_SUCCESS;
1035 }
1036
1037 /**
1038 * ixgbe_set_source_address_pruning_X550 - Enable/Disbale source address pruning
1039 * @hw: pointer to hardware structure
1040 * @enable: enable or disable source address pruning
1041 * @pool: Rx pool to set source address pruning for
1042 **/
ixgbe_set_source_address_pruning_X550(struct ixgbe_hw * hw,bool enable,unsigned int pool)1043 void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable,
1044 unsigned int pool)
1045 {
1046 u64 pfflp;
1047
1048 /* max rx pool is 63 */
1049 if (pool > 63)
1050 return;
1051
1052 pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL);
1053 pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32;
1054
1055 if (enable)
1056 pfflp |= (1ULL << pool);
1057 else
1058 pfflp &= ~(1ULL << pool);
1059
1060 IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp);
1061 IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32));
1062 }
1063
1064 /**
1065 * ixgbe_set_ethertype_anti_spoofing_X550 - Configure Ethertype anti-spoofing
1066 * @hw: pointer to hardware structure
1067 * @enable: enable or disable switch for Ethertype anti-spoofing
1068 * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
1069 *
1070 **/
ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw * hw,bool enable,int vf)1071 void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
1072 bool enable, int vf)
1073 {
1074 int vf_target_reg = vf >> 3;
1075 int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
1076 u32 pfvfspoof;
1077
1078 DEBUGFUNC("ixgbe_set_ethertype_anti_spoofing_X550");
1079
1080 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
1081 if (enable)
1082 pfvfspoof |= (1 << vf_target_shift);
1083 else
1084 pfvfspoof &= ~(1 << vf_target_shift);
1085
1086 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
1087 }
1088
1089 /**
1090 * ixgbe_iosf_wait - Wait for IOSF command completion
1091 * @hw: pointer to hardware structure
1092 * @ctrl: pointer to location to receive final IOSF control value
1093 *
1094 * Returns failing status on timeout
1095 *
1096 * Note: ctrl can be NULL if the IOSF control register value is not needed
1097 **/
ixgbe_iosf_wait(struct ixgbe_hw * hw,u32 * ctrl)1098 static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
1099 {
1100 u32 i, command = 0;
1101
1102 /* Check every 10 usec to see if the address cycle completed.
1103 * The SB IOSF BUSY bit will clear when the operation is
1104 * complete
1105 */
1106 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
1107 command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
1108 if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
1109 break;
1110 usec_delay(10);
1111 }
1112 if (ctrl)
1113 *ctrl = command;
1114 if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
1115 ERROR_REPORT1(IXGBE_ERROR_POLLING, "Wait timed out\n");
1116 return IXGBE_ERR_PHY;
1117 }
1118
1119 return IXGBE_SUCCESS;
1120 }
1121
1122 /**
1123 * ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register
1124 * of the IOSF device
1125 * @hw: pointer to hardware structure
1126 * @reg_addr: 32 bit PHY register to write
1127 * @device_type: 3 bit device type
1128 * @data: Data to write to the register
1129 **/
ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u32 data)1130 s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
1131 u32 device_type, u32 data)
1132 {
1133 u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
1134 u32 command, error __unused;
1135 s32 ret;
1136
1137 ret = ixgbe_acquire_swfw_semaphore(hw, gssr);
1138 if (ret != IXGBE_SUCCESS)
1139 return ret;
1140
1141 ret = ixgbe_iosf_wait(hw, NULL);
1142 if (ret != IXGBE_SUCCESS)
1143 goto out;
1144
1145 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
1146 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
1147
1148 /* Write IOSF control register */
1149 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
1150
1151 /* Write IOSF data register */
1152 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
1153
1154 ret = ixgbe_iosf_wait(hw, &command);
1155
1156 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
1157 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
1158 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
1159 ERROR_REPORT2(IXGBE_ERROR_POLLING,
1160 "Failed to write, error %x\n", error);
1161 ret = IXGBE_ERR_PHY;
1162 }
1163
1164 out:
1165 ixgbe_release_swfw_semaphore(hw, gssr);
1166 return ret;
1167 }
1168
1169 /**
1170 * ixgbe_read_iosf_sb_reg_x550 - Reads specified register of the IOSF device
1171 * @hw: pointer to hardware structure
1172 * @reg_addr: 32 bit PHY register to write
1173 * @device_type: 3 bit device type
1174 * @data: Pointer to read data from the register
1175 **/
ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u32 * data)1176 s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
1177 u32 device_type, u32 *data)
1178 {
1179 u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
1180 u32 command, error __unused;
1181 s32 ret;
1182
1183 ret = ixgbe_acquire_swfw_semaphore(hw, gssr);
1184 if (ret != IXGBE_SUCCESS)
1185 return ret;
1186
1187 ret = ixgbe_iosf_wait(hw, NULL);
1188 if (ret != IXGBE_SUCCESS)
1189 goto out;
1190
1191 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
1192 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
1193
1194 /* Write IOSF control register */
1195 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
1196
1197 ret = ixgbe_iosf_wait(hw, &command);
1198
1199 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
1200 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
1201 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
1202 ERROR_REPORT2(IXGBE_ERROR_POLLING,
1203 "Failed to read, error %x\n", error);
1204 ret = IXGBE_ERR_PHY;
1205 }
1206
1207 if (ret == IXGBE_SUCCESS)
1208 *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
1209
1210 out:
1211 ixgbe_release_swfw_semaphore(hw, gssr);
1212 return ret;
1213 }
1214
1215 /**
1216 * ixgbe_get_phy_token - Get the token for shared phy access
1217 * @hw: Pointer to hardware structure
1218 */
1219
ixgbe_get_phy_token(struct ixgbe_hw * hw)1220 s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
1221 {
1222 struct ixgbe_hic_phy_token_req token_cmd;
1223 s32 status;
1224
1225 token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
1226 token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
1227 token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
1228 token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1229 token_cmd.port_number = hw->bus.lan_id;
1230 token_cmd.command_type = FW_PHY_TOKEN_REQ;
1231 token_cmd.pad = 0;
1232 status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd,
1233 sizeof(token_cmd),
1234 IXGBE_HI_COMMAND_TIMEOUT,
1235 true);
1236 if (status) {
1237 DEBUGOUT1("Issuing host interface command failed with Status = %d\n",
1238 status);
1239 return status;
1240 }
1241 if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
1242 return IXGBE_SUCCESS;
1243 if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY) {
1244 DEBUGOUT1("Host interface command returned 0x%08x , returning IXGBE_ERR_FW_RESP_INVALID\n",
1245 token_cmd.hdr.cmd_or_resp.ret_status);
1246 return IXGBE_ERR_FW_RESP_INVALID;
1247 }
1248
1249 DEBUGOUT("Returning IXGBE_ERR_TOKEN_RETRY\n");
1250 return IXGBE_ERR_TOKEN_RETRY;
1251 }
1252
1253 /**
1254 * ixgbe_put_phy_token - Put the token for shared phy access
1255 * @hw: Pointer to hardware structure
1256 */
1257
ixgbe_put_phy_token(struct ixgbe_hw * hw)1258 s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
1259 {
1260 struct ixgbe_hic_phy_token_req token_cmd;
1261 s32 status;
1262
1263 token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
1264 token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
1265 token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
1266 token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1267 token_cmd.port_number = hw->bus.lan_id;
1268 token_cmd.command_type = FW_PHY_TOKEN_REL;
1269 token_cmd.pad = 0;
1270 status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd,
1271 sizeof(token_cmd),
1272 IXGBE_HI_COMMAND_TIMEOUT,
1273 true);
1274 if (status)
1275 return status;
1276 if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
1277 return IXGBE_SUCCESS;
1278
1279 DEBUGOUT("Put PHY Token host interface command failed");
1280 return IXGBE_ERR_FW_RESP_INVALID;
1281 }
1282
1283 /**
1284 * ixgbe_disable_mdd_X550
1285 * @hw: pointer to hardware structure
1286 *
1287 * Disable malicious driver detection
1288 **/
ixgbe_disable_mdd_X550(struct ixgbe_hw * hw)1289 void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw)
1290 {
1291 u32 reg;
1292
1293 DEBUGFUNC("ixgbe_disable_mdd_X550");
1294
1295 /* Disable MDD for TX DMA and interrupt */
1296 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1297 reg &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
1298 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1299
1300 /* Disable MDD for RX and interrupt */
1301 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1302 reg &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
1303 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
1304 }
1305
1306 /**
1307 * ixgbe_enable_mdd_X550
1308 * @hw: pointer to hardware structure
1309 *
1310 * Enable malicious driver detection
1311 **/
ixgbe_enable_mdd_X550(struct ixgbe_hw * hw)1312 void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw)
1313 {
1314 u32 reg;
1315
1316 DEBUGFUNC("ixgbe_enable_mdd_X550");
1317
1318 /* Enable MDD for TX DMA and interrupt */
1319 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1320 reg |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
1321 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1322
1323 /* Enable MDD for RX and interrupt */
1324 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1325 reg |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
1326 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
1327 }
1328
1329 /**
1330 * ixgbe_restore_mdd_vf_X550
1331 * @hw: pointer to hardware structure
1332 * @vf: vf index
1333 *
1334 * Restore VF that was disabled during malicious driver detection event
1335 **/
ixgbe_restore_mdd_vf_X550(struct ixgbe_hw * hw,u32 vf)1336 void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf)
1337 {
1338 u32 idx, reg, num_qs, start_q, bitmask;
1339
1340 DEBUGFUNC("ixgbe_restore_mdd_vf_X550");
1341
1342 /* Map VF to queues */
1343 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
1344 switch (reg & IXGBE_MRQC_MRQE_MASK) {
1345 case IXGBE_MRQC_VMDQRT8TCEN:
1346 num_qs = 8; /* 16 VFs / pools */
1347 bitmask = 0x000000FF;
1348 break;
1349 case IXGBE_MRQC_VMDQRSS32EN:
1350 case IXGBE_MRQC_VMDQRT4TCEN:
1351 num_qs = 4; /* 32 VFs / pools */
1352 bitmask = 0x0000000F;
1353 break;
1354 default: /* 64 VFs / pools */
1355 num_qs = 2;
1356 bitmask = 0x00000003;
1357 break;
1358 }
1359 start_q = vf * num_qs;
1360
1361 /* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */
1362 idx = start_q / 32;
1363 reg = 0;
1364 reg |= (bitmask << (start_q % 32));
1365 IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), reg);
1366 IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), reg);
1367 }
1368
1369 /**
1370 * ixgbe_mdd_event_X550
1371 * @hw: pointer to hardware structure
1372 * @vf_bitmap: vf bitmap of malicious vfs
1373 *
1374 * Handle malicious driver detection event.
1375 **/
ixgbe_mdd_event_X550(struct ixgbe_hw * hw,u32 * vf_bitmap)1376 void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap)
1377 {
1378 u32 wqbr;
1379 u32 i, j, reg, q, shift, vf, idx;
1380
1381 DEBUGFUNC("ixgbe_mdd_event_X550");
1382
1383 /* figure out pool size for mapping to vf's */
1384 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
1385 switch (reg & IXGBE_MRQC_MRQE_MASK) {
1386 case IXGBE_MRQC_VMDQRT8TCEN:
1387 shift = 3; /* 16 VFs / pools */
1388 break;
1389 case IXGBE_MRQC_VMDQRSS32EN:
1390 case IXGBE_MRQC_VMDQRT4TCEN:
1391 shift = 2; /* 32 VFs / pools */
1392 break;
1393 default:
1394 shift = 1; /* 64 VFs / pools */
1395 break;
1396 }
1397
1398 /* Read WQBR_TX and WQBR_RX and check for malicious queues */
1399 for (i = 0; i < 4; i++) {
1400 wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i));
1401 wqbr |= IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i));
1402
1403 if (!wqbr)
1404 continue;
1405
1406 /* Get malicious queue */
1407 for (j = 0; j < 32 && wqbr; j++) {
1408
1409 if (!(wqbr & (1 << j)))
1410 continue;
1411
1412 /* Get queue from bitmask */
1413 q = j + (i * 32);
1414
1415 /* Map queue to vf */
1416 vf = (q >> shift);
1417
1418 /* Set vf bit in vf_bitmap */
1419 idx = vf / 32;
1420 vf_bitmap[idx] |= (1 << (vf % 32));
1421 wqbr &= ~(1 << j);
1422 }
1423 }
1424 }
1425
1426 /**
1427 * ixgbe_get_media_type_X550em - Get media type
1428 * @hw: pointer to hardware structure
1429 *
1430 * Returns the media type (fiber, copper, backplane)
1431 */
ixgbe_get_media_type_X550em(struct ixgbe_hw * hw)1432 enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
1433 {
1434 enum ixgbe_media_type media_type;
1435
1436 DEBUGFUNC("ixgbe_get_media_type_X550em");
1437
1438 /* Detect if there is a copper PHY attached. */
1439 switch (hw->device_id) {
1440 case IXGBE_DEV_ID_X550EM_X_KR:
1441 case IXGBE_DEV_ID_X550EM_X_KX4:
1442 case IXGBE_DEV_ID_X550EM_X_XFI:
1443 case IXGBE_DEV_ID_X550EM_A_KR:
1444 case IXGBE_DEV_ID_X550EM_A_KR_L:
1445 media_type = ixgbe_media_type_backplane;
1446 break;
1447 case IXGBE_DEV_ID_X550EM_X_SFP:
1448 case IXGBE_DEV_ID_X550EM_A_SFP:
1449 case IXGBE_DEV_ID_X550EM_A_SFP_N:
1450 case IXGBE_DEV_ID_X550EM_A_QSFP:
1451 case IXGBE_DEV_ID_X550EM_A_QSFP_N:
1452 media_type = ixgbe_media_type_fiber;
1453 break;
1454 case IXGBE_DEV_ID_X550EM_X_1G_T:
1455 case IXGBE_DEV_ID_X550EM_X_10G_T:
1456 case IXGBE_DEV_ID_X550EM_A_10G_T:
1457 media_type = ixgbe_media_type_copper;
1458 break;
1459 case IXGBE_DEV_ID_X550EM_A_SGMII:
1460 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
1461 media_type = ixgbe_media_type_backplane;
1462 hw->phy.type = ixgbe_phy_sgmii;
1463 break;
1464 case IXGBE_DEV_ID_X550EM_A_1G_T:
1465 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
1466 media_type = ixgbe_media_type_copper;
1467 break;
1468 default:
1469 media_type = ixgbe_media_type_unknown;
1470 break;
1471 }
1472 return media_type;
1473 }
1474
1475 /**
1476 * ixgbe_supported_sfp_modules_X550em - Check if SFP module type is supported
1477 * @hw: pointer to hardware structure
1478 * @linear: true if SFP module is linear
1479 */
ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw * hw,bool * linear)1480 static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
1481 {
1482 DEBUGFUNC("ixgbe_supported_sfp_modules_X550em");
1483
1484 switch (hw->phy.sfp_type) {
1485 case ixgbe_sfp_type_not_present:
1486 return IXGBE_ERR_SFP_NOT_PRESENT;
1487 case ixgbe_sfp_type_da_cu_core0:
1488 case ixgbe_sfp_type_da_cu_core1:
1489 *linear = true;
1490 break;
1491 case ixgbe_sfp_type_srlr_core0:
1492 case ixgbe_sfp_type_srlr_core1:
1493 case ixgbe_sfp_type_da_act_lmt_core0:
1494 case ixgbe_sfp_type_da_act_lmt_core1:
1495 case ixgbe_sfp_type_1g_sx_core0:
1496 case ixgbe_sfp_type_1g_sx_core1:
1497 case ixgbe_sfp_type_1g_lx_core0:
1498 case ixgbe_sfp_type_1g_lx_core1:
1499 case ixgbe_sfp_type_1g_bx_core0:
1500 case ixgbe_sfp_type_1g_bx_core1:
1501 *linear = false;
1502 break;
1503 case ixgbe_sfp_type_unknown:
1504 case ixgbe_sfp_type_1g_cu_core0:
1505 case ixgbe_sfp_type_1g_cu_core1:
1506 default:
1507 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1508 }
1509
1510 return IXGBE_SUCCESS;
1511 }
1512
1513 /**
1514 * ixgbe_identify_sfp_module_X550em - Identifies SFP modules
1515 * @hw: pointer to hardware structure
1516 *
1517 * Searches for and identifies the SFP module and assigns appropriate PHY type.
1518 **/
ixgbe_identify_sfp_module_X550em(struct ixgbe_hw * hw)1519 s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw)
1520 {
1521 s32 status;
1522 bool linear;
1523
1524 DEBUGFUNC("ixgbe_identify_sfp_module_X550em");
1525
1526 status = ixgbe_identify_module_generic(hw);
1527
1528 if (status != IXGBE_SUCCESS)
1529 return status;
1530
1531 /* Check if SFP module is supported */
1532 status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
1533
1534 return status;
1535 }
1536
1537 /**
1538 * ixgbe_setup_sfp_modules_X550em - Setup MAC link ops
1539 * @hw: pointer to hardware structure
1540 */
ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw * hw)1541 s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
1542 {
1543 s32 status;
1544 bool linear;
1545
1546 DEBUGFUNC("ixgbe_setup_sfp_modules_X550em");
1547
1548 /* Check if SFP module is supported */
1549 status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
1550
1551 if (status != IXGBE_SUCCESS)
1552 return status;
1553
1554 ixgbe_init_mac_link_ops_X550em(hw);
1555 hw->phy.ops.reset = NULL;
1556
1557 return IXGBE_SUCCESS;
1558 }
1559
1560 /**
1561 * ixgbe_restart_an_internal_phy_x550em - restart autonegotiation for the
1562 * internal PHY
1563 * @hw: pointer to hardware structure
1564 **/
ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw * hw)1565 static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
1566 {
1567 s32 status;
1568 u32 link_ctrl;
1569
1570 /* Restart auto-negotiation. */
1571 status = hw->mac.ops.read_iosf_sb_reg(hw,
1572 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1573 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl);
1574
1575 if (status) {
1576 DEBUGOUT("Auto-negotiation did not complete\n");
1577 return status;
1578 }
1579
1580 link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
1581 status = hw->mac.ops.write_iosf_sb_reg(hw,
1582 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1583 IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl);
1584
1585 if (hw->mac.type == ixgbe_mac_X550EM_a) {
1586 u32 flx_mask_st20;
1587
1588 /* Indicate to FW that AN restart has been asserted */
1589 status = hw->mac.ops.read_iosf_sb_reg(hw,
1590 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1591 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_mask_st20);
1592
1593 if (status) {
1594 DEBUGOUT("Auto-negotiation did not complete\n");
1595 return status;
1596 }
1597
1598 flx_mask_st20 |= IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART;
1599 status = hw->mac.ops.write_iosf_sb_reg(hw,
1600 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1601 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_mask_st20);
1602 }
1603
1604 return status;
1605 }
1606
1607 /**
1608 * ixgbe_setup_sgmii - Set up link for sgmii
1609 * @hw: pointer to hardware structure
1610 * @speed: new link speed
1611 * @autoneg_wait: true when waiting for completion is needed
1612 */
ixgbe_setup_sgmii(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait)1613 static s32 ixgbe_setup_sgmii(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1614 bool autoneg_wait)
1615 {
1616 struct ixgbe_mac_info *mac = &hw->mac;
1617 u32 lval, sval, flx_val;
1618 s32 rc;
1619
1620 rc = mac->ops.read_iosf_sb_reg(hw,
1621 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1622 IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
1623 if (rc)
1624 return rc;
1625
1626 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1627 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1628 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
1629 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
1630 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1631 rc = mac->ops.write_iosf_sb_reg(hw,
1632 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1633 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1634 if (rc)
1635 return rc;
1636
1637 rc = mac->ops.read_iosf_sb_reg(hw,
1638 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1639 IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
1640 if (rc)
1641 return rc;
1642
1643 sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
1644 sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
1645 rc = mac->ops.write_iosf_sb_reg(hw,
1646 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1647 IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
1648 if (rc)
1649 return rc;
1650
1651 rc = mac->ops.read_iosf_sb_reg(hw,
1652 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1653 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
1654 if (rc)
1655 return rc;
1656
1657 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
1658 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
1659 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
1660 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
1661 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
1662
1663 rc = mac->ops.write_iosf_sb_reg(hw,
1664 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1665 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
1666 if (rc)
1667 return rc;
1668
1669 rc = ixgbe_restart_an_internal_phy_x550em(hw);
1670 if (rc)
1671 return rc;
1672
1673 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
1674 }
1675
1676 /**
1677 * ixgbe_setup_sgmii_fw - Set up link for internal PHY SGMII auto-negotiation
1678 * @hw: pointer to hardware structure
1679 * @speed: new link speed
1680 * @autoneg_wait: true when waiting for completion is needed
1681 */
ixgbe_setup_sgmii_fw(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait)1682 static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1683 bool autoneg_wait)
1684 {
1685 struct ixgbe_mac_info *mac = &hw->mac;
1686 u32 lval, sval, flx_val;
1687 s32 rc;
1688
1689 rc = mac->ops.read_iosf_sb_reg(hw,
1690 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1691 IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
1692 if (rc)
1693 return rc;
1694
1695 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1696 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1697 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
1698 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
1699 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1700 rc = mac->ops.write_iosf_sb_reg(hw,
1701 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1702 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1703 if (rc)
1704 return rc;
1705
1706 rc = mac->ops.read_iosf_sb_reg(hw,
1707 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1708 IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
1709 if (rc)
1710 return rc;
1711
1712 sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
1713 sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
1714 rc = mac->ops.write_iosf_sb_reg(hw,
1715 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1716 IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
1717 if (rc)
1718 return rc;
1719
1720 rc = mac->ops.write_iosf_sb_reg(hw,
1721 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1722 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1723 if (rc)
1724 return rc;
1725
1726 rc = mac->ops.read_iosf_sb_reg(hw,
1727 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1728 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
1729 if (rc)
1730 return rc;
1731
1732 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
1733 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
1734 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
1735 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
1736 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
1737
1738 rc = mac->ops.write_iosf_sb_reg(hw,
1739 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1740 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
1741 if (rc)
1742 return rc;
1743
1744 rc = ixgbe_restart_an_internal_phy_x550em(hw);
1745
1746 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
1747 }
1748
1749 /**
1750 * ixgbe_init_mac_link_ops_X550em - init mac link function pointers
1751 * @hw: pointer to hardware structure
1752 */
ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw * hw)1753 void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
1754 {
1755 struct ixgbe_mac_info *mac = &hw->mac;
1756
1757 DEBUGFUNC("ixgbe_init_mac_link_ops_X550em");
1758
1759 switch (hw->mac.ops.get_media_type(hw)) {
1760 case ixgbe_media_type_fiber:
1761 /* CS4227 does not support autoneg, so disable the laser control
1762 * functions for SFP+ fiber
1763 */
1764 mac->ops.disable_tx_laser = NULL;
1765 mac->ops.enable_tx_laser = NULL;
1766 mac->ops.flap_tx_laser = NULL;
1767 mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
1768 mac->ops.set_rate_select_speed =
1769 ixgbe_set_soft_rate_select_speed;
1770
1771 if ((hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) ||
1772 (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP))
1773 mac->ops.setup_mac_link =
1774 ixgbe_setup_mac_link_sfp_x550a;
1775 else
1776 mac->ops.setup_mac_link =
1777 ixgbe_setup_mac_link_sfp_x550em;
1778 break;
1779 case ixgbe_media_type_copper:
1780 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T)
1781 break;
1782 if (hw->mac.type == ixgbe_mac_X550EM_a) {
1783 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
1784 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
1785 mac->ops.setup_link = ixgbe_setup_sgmii_fw;
1786 mac->ops.check_link =
1787 ixgbe_check_mac_link_generic;
1788 } else {
1789 mac->ops.setup_link =
1790 ixgbe_setup_mac_link_t_X550em;
1791 }
1792 } else {
1793 mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
1794 mac->ops.check_link = ixgbe_check_link_t_X550em;
1795 }
1796 break;
1797 case ixgbe_media_type_backplane:
1798 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
1799 hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
1800 mac->ops.setup_link = ixgbe_setup_sgmii;
1801 break;
1802 default:
1803 break;
1804 }
1805 }
1806
1807 /**
1808 * ixgbe_get_link_capabilities_X550em - Determines link capabilities
1809 * @hw: pointer to hardware structure
1810 * @speed: pointer to link speed
1811 * @autoneg: true when autoneg or autotry is enabled
1812 */
ixgbe_get_link_capabilities_X550em(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * autoneg)1813 s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
1814 ixgbe_link_speed *speed,
1815 bool *autoneg)
1816 {
1817 DEBUGFUNC("ixgbe_get_link_capabilities_X550em");
1818
1819
1820 if (hw->phy.type == ixgbe_phy_fw) {
1821 *autoneg = true;
1822 *speed = hw->phy.speeds_supported;
1823 return 0;
1824 }
1825
1826 /* SFP */
1827 if (hw->phy.media_type == ixgbe_media_type_fiber) {
1828
1829 /* CS4227 SFP must not enable auto-negotiation */
1830 *autoneg = false;
1831
1832 /* Check if 1G SFP module. */
1833 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
1834 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
1835 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
1836 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
1837 hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core0 ||
1838 hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1) {
1839 *speed = IXGBE_LINK_SPEED_1GB_FULL;
1840 return IXGBE_SUCCESS;
1841 }
1842
1843 /* Link capabilities are based on SFP */
1844 if (hw->phy.multispeed_fiber)
1845 *speed = IXGBE_LINK_SPEED_10GB_FULL |
1846 IXGBE_LINK_SPEED_1GB_FULL;
1847 else
1848 *speed = IXGBE_LINK_SPEED_10GB_FULL;
1849 } else {
1850 *autoneg = true;
1851
1852 switch (hw->phy.type) {
1853 case ixgbe_phy_x550em_xfi:
1854 *speed = IXGBE_LINK_SPEED_1GB_FULL |
1855 IXGBE_LINK_SPEED_10GB_FULL;
1856 *autoneg = false;
1857 break;
1858 case ixgbe_phy_ext_1g_t:
1859 case ixgbe_phy_sgmii:
1860 *speed = IXGBE_LINK_SPEED_1GB_FULL;
1861 break;
1862 case ixgbe_phy_x550em_kr:
1863 if (hw->mac.type == ixgbe_mac_X550EM_a) {
1864 /* check different backplane modes */
1865 if (hw->phy.nw_mng_if_sel &
1866 IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
1867 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
1868 break;
1869 } else if (hw->device_id ==
1870 IXGBE_DEV_ID_X550EM_A_KR_L) {
1871 *speed = IXGBE_LINK_SPEED_1GB_FULL;
1872 break;
1873 }
1874 }
1875 *speed = IXGBE_LINK_SPEED_10GB_FULL |
1876 IXGBE_LINK_SPEED_1GB_FULL;
1877 break;
1878 default:
1879 *speed = IXGBE_LINK_SPEED_10GB_FULL |
1880 IXGBE_LINK_SPEED_1GB_FULL;
1881 break;
1882 }
1883 }
1884
1885 return IXGBE_SUCCESS;
1886 }
1887
1888 /**
1889 * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause
1890 * @hw: pointer to hardware structure
1891 * @lsc: pointer to boolean flag which indicates whether external Base T
1892 * PHY interrupt is lsc
1893 *
1894 * Determime if external Base T PHY interrupt cause is high temperature
1895 * failure alarm or link status change.
1896 *
1897 * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
1898 * failure alarm, else return PHY access status.
1899 */
ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw * hw,bool * lsc)1900 static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
1901 {
1902 u32 status;
1903 u16 reg;
1904
1905 *lsc = false;
1906
1907 /* Vendor alarm triggered */
1908 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
1909 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
1910 ®);
1911
1912 if (status != IXGBE_SUCCESS ||
1913 !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN))
1914 return status;
1915
1916 /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */
1917 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG,
1918 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
1919 ®);
1920
1921 if (status != IXGBE_SUCCESS ||
1922 !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
1923 IXGBE_MDIO_GLOBAL_ALARM_1_INT)))
1924 return status;
1925
1926 /* Global alarm triggered */
1927 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1,
1928 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
1929 ®);
1930
1931 if (status != IXGBE_SUCCESS)
1932 return status;
1933
1934 /* If high temperature failure, then return over temp error and exit */
1935 if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) {
1936 /* power down the PHY in case the PHY FW didn't already */
1937 ixgbe_set_copper_phy_power(hw, false);
1938 return IXGBE_ERR_OVERTEMP;
1939 } else if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) {
1940 /* device fault alarm triggered */
1941 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG,
1942 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
1943 ®);
1944
1945 if (status != IXGBE_SUCCESS)
1946 return status;
1947
1948 /* if device fault was due to high temp alarm handle and exit */
1949 if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) {
1950 /* power down the PHY in case the PHY FW didn't */
1951 ixgbe_set_copper_phy_power(hw, false);
1952 return IXGBE_ERR_OVERTEMP;
1953 }
1954 }
1955
1956 /* Vendor alarm 2 triggered */
1957 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
1958 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
1959
1960 if (status != IXGBE_SUCCESS ||
1961 !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT))
1962 return status;
1963
1964 /* link connect/disconnect event occurred */
1965 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2,
1966 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
1967
1968 if (status != IXGBE_SUCCESS)
1969 return status;
1970
1971 /* Indicate LSC */
1972 if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC)
1973 *lsc = true;
1974
1975 return IXGBE_SUCCESS;
1976 }
1977
1978 /**
1979 * ixgbe_enable_lasi_ext_t_x550em - Enable external Base T PHY interrupts
1980 * @hw: pointer to hardware structure
1981 *
1982 * Enable link status change and temperature failure alarm for the external
1983 * Base T PHY
1984 *
1985 * Returns PHY access status
1986 */
ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw * hw)1987 static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
1988 {
1989 u32 status;
1990 u16 reg;
1991 bool lsc;
1992
1993 /* Clear interrupt flags */
1994 status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
1995
1996 /* Enable link status change alarm */
1997
1998 /* Enable the LASI interrupts on X552 devices to receive notifications
1999 * of the link configurations of the external PHY and correspondingly
2000 * support the configuration of the internal iXFI link, since iXFI does
2001 * not support auto-negotiation. This is not required for X553 devices
2002 * having KR support, which performs auto-negotiations and which is used
2003 * as the internal link to the external PHY. Hence adding a check here
2004 * to avoid enabling LASI interrupts for X553 devices.
2005 */
2006 if (hw->mac.type != ixgbe_mac_X550EM_a) {
2007 status = hw->phy.ops.read_reg(hw,
2008 IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
2009 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2010
2011 if (status != IXGBE_SUCCESS)
2012 return status;
2013
2014 reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN;
2015
2016 status = hw->phy.ops.write_reg(hw,
2017 IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
2018 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg);
2019
2020 if (status != IXGBE_SUCCESS)
2021 return status;
2022 }
2023
2024 /* Enable high temperature failure and global fault alarms */
2025 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
2026 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2027 ®);
2028
2029 if (status != IXGBE_SUCCESS)
2030 return status;
2031
2032 reg |= (IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN |
2033 IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN);
2034
2035 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
2036 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2037 reg);
2038
2039 if (status != IXGBE_SUCCESS)
2040 return status;
2041
2042 /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */
2043 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
2044 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2045 ®);
2046
2047 if (status != IXGBE_SUCCESS)
2048 return status;
2049
2050 reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
2051 IXGBE_MDIO_GLOBAL_ALARM_1_INT);
2052
2053 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
2054 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2055 reg);
2056
2057 if (status != IXGBE_SUCCESS)
2058 return status;
2059
2060 /* Enable chip-wide vendor alarm */
2061 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
2062 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2063 ®);
2064
2065 if (status != IXGBE_SUCCESS)
2066 return status;
2067
2068 reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN;
2069
2070 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
2071 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2072 reg);
2073
2074 return status;
2075 }
2076
2077 /**
2078 * ixgbe_setup_kr_speed_x550em - Configure the KR PHY for link speed.
2079 * @hw: pointer to hardware structure
2080 * @speed: link speed
2081 *
2082 * Configures the integrated KR PHY.
2083 **/
ixgbe_setup_kr_speed_x550em(struct ixgbe_hw * hw,ixgbe_link_speed speed)2084 static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
2085 ixgbe_link_speed speed)
2086 {
2087 s32 status;
2088 u32 reg_val;
2089
2090 status = hw->mac.ops.read_iosf_sb_reg(hw,
2091 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2092 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2093 if (status)
2094 return status;
2095
2096 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
2097 reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
2098 IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
2099
2100 /* Advertise 10G support. */
2101 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
2102 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
2103
2104 /* Advertise 1G support. */
2105 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
2106 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
2107
2108 status = hw->mac.ops.write_iosf_sb_reg(hw,
2109 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2110 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2111
2112 if (hw->mac.type == ixgbe_mac_X550EM_a) {
2113 /* Set lane mode to KR auto negotiation */
2114 status = hw->mac.ops.read_iosf_sb_reg(hw,
2115 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2116 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2117
2118 if (status)
2119 return status;
2120
2121 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2122 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
2123 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2124 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2125 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2126
2127 status = hw->mac.ops.write_iosf_sb_reg(hw,
2128 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2129 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2130 }
2131
2132 return ixgbe_restart_an_internal_phy_x550em(hw);
2133 }
2134
2135 /**
2136 * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs
2137 * @hw: pointer to hardware structure
2138 */
ixgbe_reset_phy_fw(struct ixgbe_hw * hw)2139 static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
2140 {
2141 u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
2142 s32 rc;
2143
2144 if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
2145 return IXGBE_SUCCESS;
2146
2147 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store);
2148 if (rc)
2149 return rc;
2150 memset(store, 0, sizeof(store));
2151
2152 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store);
2153 if (rc)
2154 return rc;
2155
2156 return ixgbe_setup_fw_link(hw);
2157 }
2158
2159 /**
2160 * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp
2161 * @hw: pointer to hardware structure
2162 */
ixgbe_check_overtemp_fw(struct ixgbe_hw * hw)2163 static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
2164 {
2165 u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
2166 s32 rc;
2167
2168 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store);
2169 if (rc)
2170 return rc;
2171
2172 if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) {
2173 ixgbe_shutdown_fw_phy(hw);
2174 return IXGBE_ERR_OVERTEMP;
2175 }
2176 return IXGBE_SUCCESS;
2177 }
2178
2179 /**
2180 * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register
2181 * @hw: pointer to hardware structure
2182 *
2183 * Read NW_MNG_IF_SEL register and save field values, and check for valid field
2184 * values.
2185 **/
ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw * hw)2186 static s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
2187 {
2188 /* Save NW management interface connected on board. This is used
2189 * to determine internal PHY mode.
2190 */
2191 hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
2192
2193 /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set
2194 * PHY address. This register field was has only been used for X552.
2195 */
2196 if (hw->mac.type == ixgbe_mac_X550EM_a &&
2197 hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) {
2198 hw->phy.addr = (hw->phy.nw_mng_if_sel &
2199 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
2200 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
2201 }
2202
2203 return IXGBE_SUCCESS;
2204 }
2205
2206 /**
2207 * ixgbe_init_phy_ops_X550em - PHY/SFP specific init
2208 * @hw: pointer to hardware structure
2209 *
2210 * Initialize any function pointers that were not able to be
2211 * set during init_shared_code because the PHY/SFP type was
2212 * not known. Perform the SFP init if necessary.
2213 */
ixgbe_init_phy_ops_X550em(struct ixgbe_hw * hw)2214 s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
2215 {
2216 struct ixgbe_phy_info *phy = &hw->phy;
2217 s32 ret_val;
2218
2219 DEBUGFUNC("ixgbe_init_phy_ops_X550em");
2220
2221 hw->mac.ops.set_lan_id(hw);
2222 ixgbe_read_mng_if_sel_x550em(hw);
2223
2224 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
2225 phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
2226 ixgbe_setup_mux_ctl(hw);
2227 phy->ops.identify_sfp = ixgbe_identify_sfp_module_X550em;
2228 }
2229
2230 switch (hw->device_id) {
2231 case IXGBE_DEV_ID_X550EM_A_1G_T:
2232 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2233 phy->ops.read_reg_mdi = NULL;
2234 phy->ops.write_reg_mdi = NULL;
2235 hw->phy.ops.read_reg = NULL;
2236 hw->phy.ops.write_reg = NULL;
2237 phy->ops.check_overtemp = ixgbe_check_overtemp_fw;
2238 if (hw->bus.lan_id)
2239 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
2240 else
2241 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
2242
2243 break;
2244 case IXGBE_DEV_ID_X550EM_A_10G_T:
2245 case IXGBE_DEV_ID_X550EM_A_SFP:
2246 hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
2247 hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
2248 if (hw->bus.lan_id)
2249 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
2250 else
2251 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
2252 break;
2253 case IXGBE_DEV_ID_X550EM_X_SFP:
2254 /* set up for CS4227 usage */
2255 hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
2256 break;
2257 case IXGBE_DEV_ID_X550EM_X_1G_T:
2258 phy->ops.read_reg_mdi = NULL;
2259 phy->ops.write_reg_mdi = NULL;
2260 default:
2261 break;
2262 }
2263
2264 /* Identify the PHY or SFP module */
2265 ret_val = phy->ops.identify(hw);
2266 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED ||
2267 ret_val == IXGBE_ERR_PHY_ADDR_INVALID)
2268 return ret_val;
2269
2270 /* Setup function pointers based on detected hardware */
2271 ixgbe_init_mac_link_ops_X550em(hw);
2272 if (phy->sfp_type != ixgbe_sfp_type_unknown)
2273 phy->ops.reset = NULL;
2274
2275 /* Set functions pointers based on phy type */
2276 switch (hw->phy.type) {
2277 case ixgbe_phy_x550em_kx4:
2278 phy->ops.setup_link = NULL;
2279 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2280 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2281 break;
2282 case ixgbe_phy_x550em_kr:
2283 phy->ops.setup_link = ixgbe_setup_kr_x550em;
2284 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2285 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2286 break;
2287 case ixgbe_phy_ext_1g_t:
2288 /* link is managed by FW */
2289 phy->ops.setup_link = NULL;
2290 phy->ops.reset = NULL;
2291 break;
2292 case ixgbe_phy_x550em_xfi:
2293 /* link is managed by HW */
2294 phy->ops.setup_link = NULL;
2295 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2296 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2297 break;
2298 case ixgbe_phy_x550em_ext_t:
2299 /* If internal link mode is XFI, then setup iXFI internal link,
2300 * else setup KR now.
2301 */
2302 phy->ops.setup_internal_link =
2303 ixgbe_setup_internal_phy_t_x550em;
2304
2305 /* setup SW LPLU only for first revision of X550EM_x */
2306 if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
2307 !(IXGBE_FUSES0_REV_MASK &
2308 IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
2309 phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em;
2310
2311 phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
2312 phy->ops.reset = ixgbe_reset_phy_t_X550em;
2313 break;
2314 case ixgbe_phy_sgmii:
2315 phy->ops.setup_link = NULL;
2316 break;
2317 case ixgbe_phy_fw:
2318 phy->ops.setup_link = ixgbe_setup_fw_link;
2319 phy->ops.reset = ixgbe_reset_phy_fw;
2320 break;
2321 default:
2322 break;
2323 }
2324 return ret_val;
2325 }
2326
2327 /**
2328 * ixgbe_set_mdio_speed - Set MDIO clock speed
2329 * @hw: pointer to hardware structure
2330 */
ixgbe_set_mdio_speed(struct ixgbe_hw * hw)2331 static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
2332 {
2333 u32 hlreg0;
2334
2335 switch (hw->device_id) {
2336 case IXGBE_DEV_ID_X550EM_X_10G_T:
2337 case IXGBE_DEV_ID_X550EM_A_SGMII:
2338 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
2339 case IXGBE_DEV_ID_X550EM_A_10G_T:
2340 case IXGBE_DEV_ID_X550EM_A_SFP:
2341 case IXGBE_DEV_ID_X550EM_A_QSFP:
2342 /* Config MDIO clock speed before the first MDIO PHY access */
2343 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2344 hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
2345 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2346 break;
2347 case IXGBE_DEV_ID_X550EM_A_1G_T:
2348 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2349 /* Select fast MDIO clock speed for these devices */
2350 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2351 hlreg0 |= IXGBE_HLREG0_MDCSPD;
2352 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2353 break;
2354 default:
2355 break;
2356 }
2357 }
2358
2359 /**
2360 * ixgbe_reset_hw_X550em - Perform hardware reset
2361 * @hw: pointer to hardware structure
2362 *
2363 * Resets the hardware by resetting the transmit and receive units, masks
2364 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
2365 * reset.
2366 */
ixgbe_reset_hw_X550em(struct ixgbe_hw * hw)2367 s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
2368 {
2369 ixgbe_link_speed link_speed;
2370 s32 status;
2371 u32 ctrl = 0;
2372 u32 i;
2373 bool link_up = false;
2374 u32 swfw_mask = hw->phy.phy_semaphore_mask;
2375
2376 DEBUGFUNC("ixgbe_reset_hw_X550em");
2377
2378 /* Call adapter stop to disable Tx/Rx and clear interrupts */
2379 status = hw->mac.ops.stop_adapter(hw);
2380 if (status != IXGBE_SUCCESS) {
2381 DEBUGOUT1("Failed to stop adapter, STATUS = %d\n", status);
2382 return status;
2383 }
2384 /* flush pending Tx transactions */
2385 ixgbe_clear_tx_pending(hw);
2386
2387 ixgbe_set_mdio_speed(hw);
2388
2389 /* PHY ops must be identified and initialized prior to reset */
2390 status = hw->phy.ops.init(hw);
2391
2392 if (status)
2393 DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n",
2394 status);
2395
2396 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED ||
2397 status == IXGBE_ERR_PHY_ADDR_INVALID) {
2398 DEBUGOUT("Returning from reset HW due to PHY init failure\n");
2399 return status;
2400 }
2401
2402 /* start the external PHY */
2403 if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
2404 status = ixgbe_init_ext_t_x550em(hw);
2405 if (status) {
2406 DEBUGOUT1("Failed to start the external PHY, STATUS = %d\n",
2407 status);
2408 return status;
2409 }
2410 }
2411
2412 /* Setup SFP module if there is one present. */
2413 if (hw->phy.sfp_setup_needed) {
2414 status = hw->mac.ops.setup_sfp(hw);
2415 hw->phy.sfp_setup_needed = false;
2416 }
2417
2418 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
2419 return status;
2420
2421 /* Reset PHY */
2422 if (!hw->phy.reset_disable && hw->phy.ops.reset) {
2423 if (hw->phy.ops.reset(hw) == IXGBE_ERR_OVERTEMP)
2424 return IXGBE_ERR_OVERTEMP;
2425 }
2426
2427 mac_reset_top:
2428 /* Issue global reset to the MAC. Needs to be SW reset if link is up.
2429 * If link reset is used when link is up, it might reset the PHY when
2430 * mng is using it. If link is down or the flag to force full link
2431 * reset is set, then perform link reset.
2432 */
2433 ctrl = IXGBE_CTRL_LNK_RST;
2434 if (!hw->force_full_reset) {
2435 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
2436 if (link_up)
2437 ctrl = IXGBE_CTRL_RST;
2438 }
2439
2440 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
2441 if (status != IXGBE_SUCCESS) {
2442 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
2443 "semaphore failed with %d", status);
2444 return IXGBE_ERR_SWFW_SYNC;
2445 }
2446 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
2447 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
2448 IXGBE_WRITE_FLUSH(hw);
2449 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2450
2451 /* Poll for reset bit to self-clear meaning reset is complete */
2452 for (i = 0; i < 10; i++) {
2453 usec_delay(1);
2454 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
2455 if (!(ctrl & IXGBE_CTRL_RST_MASK))
2456 break;
2457 }
2458
2459 if (ctrl & IXGBE_CTRL_RST_MASK) {
2460 status = IXGBE_ERR_RESET_FAILED;
2461 DEBUGOUT("Reset polling failed to complete.\n");
2462 }
2463
2464 msec_delay(50);
2465
2466 /* Double resets are required for recovery from certain error
2467 * conditions. Between resets, it is necessary to stall to
2468 * allow time for any pending HW events to complete.
2469 */
2470 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
2471 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2472 goto mac_reset_top;
2473 }
2474
2475 /* Store the permanent mac address */
2476 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
2477
2478 /* Store MAC address from RAR0, clear receive address registers, and
2479 * clear the multicast table. Also reset num_rar_entries to 128,
2480 * since we modify this value when programming the SAN MAC address.
2481 */
2482 hw->mac.num_rar_entries = 128;
2483 hw->mac.ops.init_rx_addrs(hw);
2484
2485 ixgbe_set_mdio_speed(hw);
2486
2487 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
2488 ixgbe_setup_mux_ctl(hw);
2489
2490 if (status != IXGBE_SUCCESS)
2491 DEBUGOUT1("Reset HW failed, STATUS = %d\n", status);
2492
2493 return status;
2494 }
2495
2496 /**
2497 * ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
2498 * @hw: pointer to hardware structure
2499 */
ixgbe_init_ext_t_x550em(struct ixgbe_hw * hw)2500 s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
2501 {
2502 u32 status;
2503 u16 reg;
2504
2505 status = hw->phy.ops.read_reg(hw,
2506 IXGBE_MDIO_TX_VENDOR_ALARMS_3,
2507 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
2508 ®);
2509
2510 if (status != IXGBE_SUCCESS)
2511 return status;
2512
2513 /* If PHY FW reset completed bit is set then this is the first
2514 * SW instance after a power on so the PHY FW must be un-stalled.
2515 */
2516 if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
2517 status = hw->phy.ops.read_reg(hw,
2518 IXGBE_MDIO_GLOBAL_RES_PR_10,
2519 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2520 ®);
2521
2522 if (status != IXGBE_SUCCESS)
2523 return status;
2524
2525 reg &= ~IXGBE_MDIO_POWER_UP_STALL;
2526
2527 status = hw->phy.ops.write_reg(hw,
2528 IXGBE_MDIO_GLOBAL_RES_PR_10,
2529 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2530 reg);
2531
2532 if (status != IXGBE_SUCCESS)
2533 return status;
2534 }
2535
2536 return status;
2537 }
2538
2539 /**
2540 * ixgbe_setup_kr_x550em - Configure the KR PHY.
2541 * @hw: pointer to hardware structure
2542 **/
ixgbe_setup_kr_x550em(struct ixgbe_hw * hw)2543 s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
2544 {
2545 /* leave link alone for 2.5G */
2546 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
2547 return IXGBE_SUCCESS;
2548
2549 if (ixgbe_check_reset_blocked(hw))
2550 return 0;
2551
2552 return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
2553 }
2554
2555 /**
2556 * ixgbe_setup_mac_link_sfp_x550em - Setup internal/external the PHY for SFP
2557 * @hw: pointer to hardware structure
2558 * @speed: new link speed
2559 * @autoneg_wait_to_complete: unused
2560 *
2561 * Configure the external PHY and the integrated KR PHY for SFP support.
2562 **/
ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)2563 s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
2564 ixgbe_link_speed speed,
2565 bool autoneg_wait_to_complete)
2566 {
2567 s32 ret_val;
2568 u16 reg_slice, reg_val;
2569 bool setup_linear = false;
2570 UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
2571
2572 /* Check if SFP module is supported and linear */
2573 ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
2574
2575 /* If no SFP module present, then return success. Return success since
2576 * there is no reason to configure CS4227 and SFP not present error is
2577 * not excepted in the setup MAC link flow.
2578 */
2579 if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
2580 return IXGBE_SUCCESS;
2581
2582 if (ret_val != IXGBE_SUCCESS)
2583 return ret_val;
2584
2585 /* Configure internal PHY for KR/KX. */
2586 ixgbe_setup_kr_speed_x550em(hw, speed);
2587
2588 /* Configure CS4227 LINE side to proper mode. */
2589 reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB +
2590 (hw->bus.lan_id << 12);
2591 if (setup_linear)
2592 reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
2593 else
2594 reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
2595 ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
2596 reg_val);
2597 return ret_val;
2598 }
2599
2600 /**
2601 * ixgbe_setup_sfi_x550a - Configure the internal PHY for native SFI mode
2602 * @hw: pointer to hardware structure
2603 * @speed: the link speed to force
2604 *
2605 * Configures the integrated PHY for native SFI mode. Used to connect the
2606 * internal PHY directly to an SFP cage, without autonegotiation.
2607 **/
ixgbe_setup_sfi_x550a(struct ixgbe_hw * hw,ixgbe_link_speed * speed)2608 static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
2609 {
2610 struct ixgbe_mac_info *mac = &hw->mac;
2611 s32 status;
2612 u32 reg_val;
2613
2614 /* Disable all AN and force speed to 10G Serial. */
2615 status = mac->ops.read_iosf_sb_reg(hw,
2616 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2617 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2618 if (status != IXGBE_SUCCESS)
2619 return status;
2620
2621 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2622 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2623 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2624 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2625
2626 /* Select forced link speed for internal PHY. */
2627 switch (*speed) {
2628 case IXGBE_LINK_SPEED_10GB_FULL:
2629 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G;
2630 break;
2631 case IXGBE_LINK_SPEED_1GB_FULL:
2632 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
2633 break;
2634 default:
2635 /* Other link speeds are not supported by internal PHY. */
2636 return IXGBE_ERR_LINK_SETUP;
2637 }
2638
2639 status = mac->ops.write_iosf_sb_reg(hw,
2640 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2641 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2642
2643 /* Toggle port SW reset by AN reset. */
2644 status = ixgbe_restart_an_internal_phy_x550em(hw);
2645
2646 return status;
2647 }
2648
2649 /**
2650 * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP
2651 * @hw: pointer to hardware structure
2652 * @speed: new link speed
2653 * @autoneg_wait_to_complete: unused
2654 *
2655 * Configure the integrated PHY for SFP support.
2656 **/
ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)2657 s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
2658 ixgbe_link_speed speed,
2659 bool autoneg_wait_to_complete)
2660 {
2661 s32 ret_val;
2662 u16 reg_phy_ext;
2663 bool setup_linear = false;
2664 u32 reg_slice, reg_phy_int, slice_offset;
2665
2666 UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
2667
2668 /* Check if SFP module is supported and linear */
2669 ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
2670
2671 /* If no SFP module present, then return success. Return success since
2672 * SFP not present error is not excepted in the setup MAC link flow.
2673 */
2674 if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
2675 return IXGBE_SUCCESS;
2676
2677 if (ret_val != IXGBE_SUCCESS)
2678 return ret_val;
2679
2680 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
2681 /* Configure internal PHY for native SFI based on module type */
2682 ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
2683 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2684 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_phy_int);
2685
2686 if (ret_val != IXGBE_SUCCESS)
2687 return ret_val;
2688
2689 reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA;
2690 if (!setup_linear)
2691 reg_phy_int |= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR;
2692
2693 ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
2694 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2695 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int);
2696
2697 if (ret_val != IXGBE_SUCCESS)
2698 return ret_val;
2699
2700 /* Setup SFI internal link. */
2701 ret_val = ixgbe_setup_sfi_x550a(hw, &speed);
2702 } else {
2703 /* Configure internal PHY for KR/KX. */
2704 ixgbe_setup_kr_speed_x550em(hw, speed);
2705
2706 if (hw->phy.addr == 0x0 || hw->phy.addr == 0xFFFF) {
2707 /* Find Address */
2708 DEBUGOUT("Invalid NW_MNG_IF_SEL.MDIO_PHY_ADD value\n");
2709 return IXGBE_ERR_PHY_ADDR_INVALID;
2710 }
2711
2712 /* Get external PHY SKU id */
2713 ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU,
2714 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2715
2716 if (ret_val != IXGBE_SUCCESS)
2717 return ret_val;
2718
2719 /* When configuring quad port CS4223, the MAC instance is part
2720 * of the slice offset.
2721 */
2722 if (reg_phy_ext == IXGBE_CS4223_SKU_ID)
2723 slice_offset = (hw->bus.lan_id +
2724 (hw->bus.instance_id << 1)) << 12;
2725 else
2726 slice_offset = hw->bus.lan_id << 12;
2727
2728 /* Configure CS4227/CS4223 LINE side to proper mode. */
2729 reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset;
2730
2731 ret_val = hw->phy.ops.read_reg(hw, reg_slice,
2732 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2733
2734 if (ret_val != IXGBE_SUCCESS)
2735 return ret_val;
2736
2737 reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) |
2738 (IXGBE_CS4227_EDC_MODE_SR << 1));
2739
2740 if (setup_linear)
2741 reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
2742 else
2743 reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
2744 ret_val = hw->phy.ops.write_reg(hw, reg_slice,
2745 IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext);
2746
2747 /* Flush previous write with a read */
2748 ret_val = hw->phy.ops.read_reg(hw, reg_slice,
2749 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2750 }
2751 return ret_val;
2752 }
2753
2754 /**
2755 * ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration
2756 * @hw: pointer to hardware structure
2757 *
2758 * iXfI configuration needed for ixgbe_mac_X550EM_x devices.
2759 **/
ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw * hw)2760 static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
2761 {
2762 struct ixgbe_mac_info *mac = &hw->mac;
2763 s32 status;
2764 u32 reg_val;
2765
2766 /* Disable training protocol FSM. */
2767 status = mac->ops.read_iosf_sb_reg(hw,
2768 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
2769 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2770 if (status != IXGBE_SUCCESS)
2771 return status;
2772 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
2773 status = mac->ops.write_iosf_sb_reg(hw,
2774 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
2775 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2776 if (status != IXGBE_SUCCESS)
2777 return status;
2778
2779 /* Disable Flex from training TXFFE. */
2780 status = mac->ops.read_iosf_sb_reg(hw,
2781 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
2782 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2783 if (status != IXGBE_SUCCESS)
2784 return status;
2785 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
2786 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
2787 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
2788 status = mac->ops.write_iosf_sb_reg(hw,
2789 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
2790 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2791 if (status != IXGBE_SUCCESS)
2792 return status;
2793 status = mac->ops.read_iosf_sb_reg(hw,
2794 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
2795 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2796 if (status != IXGBE_SUCCESS)
2797 return status;
2798 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
2799 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
2800 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
2801 status = mac->ops.write_iosf_sb_reg(hw,
2802 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
2803 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2804 if (status != IXGBE_SUCCESS)
2805 return status;
2806
2807 /* Enable override for coefficients. */
2808 status = mac->ops.read_iosf_sb_reg(hw,
2809 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
2810 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2811 if (status != IXGBE_SUCCESS)
2812 return status;
2813 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN;
2814 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
2815 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
2816 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
2817 status = mac->ops.write_iosf_sb_reg(hw,
2818 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
2819 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2820 return status;
2821 }
2822
2823 /**
2824 * ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
2825 * @hw: pointer to hardware structure
2826 * @speed: the link speed to force
2827 *
2828 * Configures the integrated KR PHY to use iXFI mode. Used to connect an
2829 * internal and external PHY at a specific speed, without autonegotiation.
2830 **/
ixgbe_setup_ixfi_x550em(struct ixgbe_hw * hw,ixgbe_link_speed * speed)2831 static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
2832 {
2833 struct ixgbe_mac_info *mac = &hw->mac;
2834 s32 status;
2835 u32 reg_val;
2836
2837 /* iXFI is only supported with X552 */
2838 if (mac->type != ixgbe_mac_X550EM_x)
2839 return IXGBE_ERR_LINK_SETUP;
2840
2841 /* Disable AN and force speed to 10G Serial. */
2842 status = mac->ops.read_iosf_sb_reg(hw,
2843 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2844 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2845 if (status != IXGBE_SUCCESS)
2846 return status;
2847
2848 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
2849 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
2850
2851 /* Select forced link speed for internal PHY. */
2852 switch (*speed) {
2853 case IXGBE_LINK_SPEED_10GB_FULL:
2854 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
2855 break;
2856 case IXGBE_LINK_SPEED_1GB_FULL:
2857 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
2858 break;
2859 default:
2860 /* Other link speeds are not supported by internal KR PHY. */
2861 return IXGBE_ERR_LINK_SETUP;
2862 }
2863
2864 status = mac->ops.write_iosf_sb_reg(hw,
2865 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2866 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2867 if (status != IXGBE_SUCCESS)
2868 return status;
2869
2870 /* Additional configuration needed for x550em_x */
2871 if (hw->mac.type == ixgbe_mac_X550EM_x) {
2872 status = ixgbe_setup_ixfi_x550em_x(hw);
2873 if (status != IXGBE_SUCCESS)
2874 return status;
2875 }
2876
2877 /* Toggle port SW reset by AN reset. */
2878 status = ixgbe_restart_an_internal_phy_x550em(hw);
2879
2880 return status;
2881 }
2882
2883 /**
2884 * ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status
2885 * @hw: address of hardware structure
2886 * @link_up: address of boolean to indicate link status
2887 *
2888 * Returns error code if unable to get link status.
2889 */
ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw * hw,bool * link_up)2890 static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
2891 {
2892 u32 ret;
2893 u16 autoneg_status;
2894
2895 *link_up = false;
2896
2897 /* read this twice back to back to indicate current status */
2898 ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
2899 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2900 &autoneg_status);
2901 if (ret != IXGBE_SUCCESS)
2902 return ret;
2903
2904 ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
2905 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2906 &autoneg_status);
2907 if (ret != IXGBE_SUCCESS)
2908 return ret;
2909
2910 *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS);
2911
2912 return IXGBE_SUCCESS;
2913 }
2914
2915 /**
2916 * ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link
2917 * @hw: point to hardware structure
2918 *
2919 * Configures the link between the integrated KR PHY and the external X557 PHY
2920 * The driver will call this function when it gets a link status change
2921 * interrupt from the X557 PHY. This function configures the link speed
2922 * between the PHYs to match the link speed of the BASE-T link.
2923 *
2924 * A return of a non-zero value indicates an error, and the base driver should
2925 * not report link up.
2926 */
ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw * hw)2927 s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
2928 {
2929 ixgbe_link_speed force_speed;
2930 bool link_up;
2931 u32 status;
2932 u16 speed;
2933
2934 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
2935 return IXGBE_ERR_CONFIG;
2936
2937 if (hw->mac.type == ixgbe_mac_X550EM_x &&
2938 !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
2939 /* If link is down, there is no setup necessary so return */
2940 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
2941 if (status != IXGBE_SUCCESS)
2942 return status;
2943
2944 if (!link_up)
2945 return IXGBE_SUCCESS;
2946
2947 status = hw->phy.ops.read_reg(hw,
2948 IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
2949 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2950 &speed);
2951 if (status != IXGBE_SUCCESS)
2952 return status;
2953
2954 /* If link is still down - no setup is required so return */
2955 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
2956 if (status != IXGBE_SUCCESS)
2957 return status;
2958 if (!link_up)
2959 return IXGBE_SUCCESS;
2960
2961 /* clear everything but the speed and duplex bits */
2962 speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK;
2963
2964 switch (speed) {
2965 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL:
2966 force_speed = IXGBE_LINK_SPEED_10GB_FULL;
2967 break;
2968 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL:
2969 force_speed = IXGBE_LINK_SPEED_1GB_FULL;
2970 break;
2971 default:
2972 /* Internal PHY does not support anything else */
2973 return IXGBE_ERR_INVALID_LINK_SETTINGS;
2974 }
2975
2976 return ixgbe_setup_ixfi_x550em(hw, &force_speed);
2977 } else {
2978 speed = IXGBE_LINK_SPEED_10GB_FULL |
2979 IXGBE_LINK_SPEED_1GB_FULL;
2980 return ixgbe_setup_kr_speed_x550em(hw, speed);
2981 }
2982 }
2983
2984 /**
2985 * ixgbe_setup_phy_loopback_x550em - Configure the KR PHY for loopback.
2986 * @hw: pointer to hardware structure
2987 *
2988 * Configures the integrated KR PHY to use internal loopback mode.
2989 **/
ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw * hw)2990 s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw)
2991 {
2992 s32 status;
2993 u32 reg_val;
2994
2995 /* Disable AN and force speed to 10G Serial. */
2996 status = hw->mac.ops.read_iosf_sb_reg(hw,
2997 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2998 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2999 if (status != IXGBE_SUCCESS)
3000 return status;
3001 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
3002 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
3003 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
3004 status = hw->mac.ops.write_iosf_sb_reg(hw,
3005 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3006 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3007 if (status != IXGBE_SUCCESS)
3008 return status;
3009
3010 /* Set near-end loopback clocks. */
3011 status = hw->mac.ops.read_iosf_sb_reg(hw,
3012 IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
3013 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3014 if (status != IXGBE_SUCCESS)
3015 return status;
3016 reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B;
3017 reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS;
3018 status = hw->mac.ops.write_iosf_sb_reg(hw,
3019 IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
3020 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3021 if (status != IXGBE_SUCCESS)
3022 return status;
3023
3024 /* Set loopback enable. */
3025 status = hw->mac.ops.read_iosf_sb_reg(hw,
3026 IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
3027 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3028 if (status != IXGBE_SUCCESS)
3029 return status;
3030 reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK;
3031 status = hw->mac.ops.write_iosf_sb_reg(hw,
3032 IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
3033 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3034 if (status != IXGBE_SUCCESS)
3035 return status;
3036
3037 /* Training bypass. */
3038 status = hw->mac.ops.read_iosf_sb_reg(hw,
3039 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3040 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3041 if (status != IXGBE_SUCCESS)
3042 return status;
3043 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS;
3044 status = hw->mac.ops.write_iosf_sb_reg(hw,
3045 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3046 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3047
3048 return status;
3049 }
3050
3051 /**
3052 * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
3053 * assuming that the semaphore is already obtained.
3054 * @hw: pointer to hardware structure
3055 * @offset: offset of word in the EEPROM to read
3056 * @data: word read from the EEPROM
3057 *
3058 * Reads a 16 bit word from the EEPROM using the hostif.
3059 **/
ixgbe_read_ee_hostif_X550(struct ixgbe_hw * hw,u16 offset,u16 * data)3060 s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
3061 {
3062 const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
3063 struct ixgbe_hic_read_shadow_ram buffer;
3064 s32 status;
3065
3066 DEBUGFUNC("ixgbe_read_ee_hostif_X550");
3067 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
3068 buffer.hdr.req.buf_lenh = 0;
3069 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
3070 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3071
3072 /* convert offset from words to bytes */
3073 buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
3074 /* one word */
3075 buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
3076 buffer.pad2 = 0;
3077 buffer.data = 0;
3078 buffer.pad3 = 0;
3079
3080 status = hw->mac.ops.acquire_swfw_sync(hw, mask);
3081 if (status)
3082 return status;
3083
3084 status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
3085 IXGBE_HI_COMMAND_TIMEOUT);
3086 if (!status) {
3087 *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
3088 FW_NVM_DATA_OFFSET);
3089 }
3090
3091 hw->mac.ops.release_swfw_sync(hw, mask);
3092 return status;
3093 }
3094
3095 /**
3096 * ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif
3097 * @hw: pointer to hardware structure
3098 * @offset: offset of word in the EEPROM to read
3099 * @words: number of words
3100 * @data: word(s) read from the EEPROM
3101 *
3102 * Reads a 16 bit word(s) from the EEPROM using the hostif.
3103 **/
ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)3104 s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
3105 u16 offset, u16 words, u16 *data)
3106 {
3107 const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
3108 struct ixgbe_hic_read_shadow_ram buffer;
3109 u32 current_word = 0;
3110 u16 words_to_read;
3111 s32 status;
3112 u32 i;
3113
3114 DEBUGFUNC("ixgbe_read_ee_hostif_buffer_X550");
3115
3116 /* Take semaphore for the entire operation. */
3117 status = hw->mac.ops.acquire_swfw_sync(hw, mask);
3118 if (status) {
3119 DEBUGOUT("EEPROM read buffer - semaphore failed\n");
3120 return status;
3121 }
3122
3123 while (words) {
3124 if (words > FW_MAX_READ_BUFFER_SIZE / 2)
3125 words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
3126 else
3127 words_to_read = words;
3128
3129 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
3130 buffer.hdr.req.buf_lenh = 0;
3131 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
3132 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3133
3134 /* convert offset from words to bytes */
3135 buffer.address = IXGBE_CPU_TO_BE32((offset + current_word) * 2);
3136 buffer.length = IXGBE_CPU_TO_BE16(words_to_read * 2);
3137 buffer.pad2 = 0;
3138 buffer.data = 0;
3139 buffer.pad3 = 0;
3140
3141 status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
3142 IXGBE_HI_COMMAND_TIMEOUT);
3143
3144 if (status) {
3145 DEBUGOUT("Host interface command failed\n");
3146 goto out;
3147 }
3148
3149 for (i = 0; i < words_to_read; i++) {
3150 u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) +
3151 2 * i;
3152 u32 value = IXGBE_READ_REG(hw, reg);
3153
3154 data[current_word] = (u16)(value & 0xffff);
3155 current_word++;
3156 i++;
3157 if (i < words_to_read) {
3158 value >>= 16;
3159 data[current_word] = (u16)(value & 0xffff);
3160 current_word++;
3161 }
3162 }
3163 words -= words_to_read;
3164 }
3165
3166 out:
3167 hw->mac.ops.release_swfw_sync(hw, mask);
3168 return status;
3169 }
3170
3171 /**
3172 * ixgbe_write_ee_hostif_data_X550 - Write EEPROM word using hostif
3173 * @hw: pointer to hardware structure
3174 * @offset: offset of word in the EEPROM to write
3175 * @data: word write to the EEPROM
3176 *
3177 * Write a 16 bit word to the EEPROM using the hostif.
3178 **/
ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw * hw,u16 offset,u16 data)3179 s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
3180 u16 data)
3181 {
3182 s32 status;
3183 struct ixgbe_hic_write_shadow_ram buffer;
3184
3185 DEBUGFUNC("ixgbe_write_ee_hostif_data_X550");
3186
3187 buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD;
3188 buffer.hdr.req.buf_lenh = 0;
3189 buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN;
3190 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3191
3192 /* one word */
3193 buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
3194 buffer.data = data;
3195 buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
3196
3197 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
3198 sizeof(buffer),
3199 IXGBE_HI_COMMAND_TIMEOUT, true);
3200 if (status != IXGBE_SUCCESS) {
3201 DEBUGOUT2("for offset %04x failed with status %d\n",
3202 offset, status);
3203 return status;
3204 }
3205
3206 if (buffer.hdr.rsp.buf_lenh_status != FW_CEM_RESP_STATUS_SUCCESS) {
3207 DEBUGOUT2("for offset %04x host interface return status %02x\n",
3208 offset, buffer.hdr.rsp.buf_lenh_status);
3209 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3210 }
3211
3212 return status;
3213 }
3214
3215 /**
3216 * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
3217 * @hw: pointer to hardware structure
3218 * @offset: offset of word in the EEPROM to write
3219 * @data: word write to the EEPROM
3220 *
3221 * Write a 16 bit word to the EEPROM using the hostif.
3222 **/
ixgbe_write_ee_hostif_X550(struct ixgbe_hw * hw,u16 offset,u16 data)3223 s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
3224 u16 data)
3225 {
3226 s32 status = IXGBE_SUCCESS;
3227
3228 DEBUGFUNC("ixgbe_write_ee_hostif_X550");
3229
3230 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
3231 IXGBE_SUCCESS) {
3232 status = ixgbe_write_ee_hostif_data_X550(hw, offset, data);
3233 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3234 } else {
3235 DEBUGOUT("write ee hostif failed to get semaphore");
3236 status = IXGBE_ERR_SWFW_SYNC;
3237 }
3238
3239 return status;
3240 }
3241
3242 /**
3243 * ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif
3244 * @hw: pointer to hardware structure
3245 * @offset: offset of word in the EEPROM to write
3246 * @words: number of words
3247 * @data: word(s) write to the EEPROM
3248 *
3249 * Write a 16 bit word(s) to the EEPROM using the hostif.
3250 **/
ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)3251 s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
3252 u16 offset, u16 words, u16 *data)
3253 {
3254 s32 status = IXGBE_SUCCESS;
3255 u32 i = 0;
3256
3257 DEBUGFUNC("ixgbe_write_ee_hostif_buffer_X550");
3258
3259 /* Take semaphore for the entire operation. */
3260 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3261 if (status != IXGBE_SUCCESS) {
3262 DEBUGOUT("EEPROM write buffer - semaphore failed\n");
3263 goto out;
3264 }
3265
3266 for (i = 0; i < words; i++) {
3267 status = ixgbe_write_ee_hostif_data_X550(hw, offset + i,
3268 data[i]);
3269
3270 if (status != IXGBE_SUCCESS) {
3271 DEBUGOUT("Eeprom buffered write failed\n");
3272 break;
3273 }
3274 }
3275
3276 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3277 out:
3278
3279 return status;
3280 }
3281
3282 /**
3283 * ixgbe_checksum_ptr_x550 - Checksum one pointer region
3284 * @hw: pointer to hardware structure
3285 * @ptr: pointer offset in eeprom
3286 * @size: size of section pointed by ptr, if 0 first word will be used as size
3287 * @csum: address of checksum to update
3288 * @buffer: pointer to buffer containing calculated checksum
3289 * @buffer_size: size of buffer
3290 *
3291 * Returns error status for any failure
3292 */
ixgbe_checksum_ptr_x550(struct ixgbe_hw * hw,u16 ptr,u16 size,u16 * csum,u16 * buffer,u32 buffer_size)3293 static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
3294 u16 size, u16 *csum, u16 *buffer,
3295 u32 buffer_size)
3296 {
3297 u16 buf[256];
3298 s32 status;
3299 u16 length, bufsz, i, start;
3300 u16 *local_buffer;
3301
3302 bufsz = sizeof(buf) / sizeof(buf[0]);
3303
3304 /* Read a chunk at the pointer location */
3305 if (!buffer) {
3306 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf);
3307 if (status) {
3308 DEBUGOUT("Failed to read EEPROM image\n");
3309 return status;
3310 }
3311 local_buffer = buf;
3312 } else {
3313 if (buffer_size < ptr)
3314 return IXGBE_ERR_PARAM;
3315 local_buffer = &buffer[ptr];
3316 }
3317
3318 if (size) {
3319 start = 0;
3320 length = size;
3321 } else {
3322 start = 1;
3323 length = local_buffer[0];
3324
3325 /* Skip pointer section if length is invalid. */
3326 if (length == 0xFFFF || length == 0 ||
3327 (ptr + length) >= hw->eeprom.word_size)
3328 return IXGBE_SUCCESS;
3329 }
3330
3331 if (buffer && ((u32)start + (u32)length > buffer_size))
3332 return IXGBE_ERR_PARAM;
3333
3334 for (i = start; length; i++, length--) {
3335 if (i == bufsz && !buffer) {
3336 ptr += bufsz;
3337 i = 0;
3338 if (length < bufsz)
3339 bufsz = length;
3340
3341 /* Read a chunk at the pointer location */
3342 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr,
3343 bufsz, buf);
3344 if (status) {
3345 DEBUGOUT("Failed to read EEPROM image\n");
3346 return status;
3347 }
3348 }
3349 *csum += local_buffer[i];
3350 }
3351 return IXGBE_SUCCESS;
3352 }
3353
3354 /**
3355 * ixgbe_calc_checksum_X550 - Calculates and returns the checksum
3356 * @hw: pointer to hardware structure
3357 * @buffer: pointer to buffer containing calculated checksum
3358 * @buffer_size: size of buffer
3359 *
3360 * Returns a negative error code on error, or the 16-bit checksum
3361 **/
ixgbe_calc_checksum_X550(struct ixgbe_hw * hw,u16 * buffer,u32 buffer_size)3362 s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size)
3363 {
3364 u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
3365 u16 *local_buffer;
3366 s32 status;
3367 u16 checksum = 0;
3368 u16 pointer, i, size;
3369
3370 DEBUGFUNC("ixgbe_calc_eeprom_checksum_X550");
3371
3372 hw->eeprom.ops.init_params(hw);
3373
3374 if (!buffer) {
3375 /* Read pointer area */
3376 status = ixgbe_read_ee_hostif_buffer_X550(hw, 0,
3377 IXGBE_EEPROM_LAST_WORD + 1,
3378 eeprom_ptrs);
3379 if (status) {
3380 DEBUGOUT("Failed to read EEPROM image\n");
3381 return status;
3382 }
3383 local_buffer = eeprom_ptrs;
3384 } else {
3385 if (buffer_size < IXGBE_EEPROM_LAST_WORD)
3386 return IXGBE_ERR_PARAM;
3387 local_buffer = buffer;
3388 }
3389
3390 /*
3391 * For X550 hardware include 0x0-0x41 in the checksum, skip the
3392 * checksum word itself
3393 */
3394 for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++)
3395 if (i != IXGBE_EEPROM_CHECKSUM)
3396 checksum += local_buffer[i];
3397
3398 /*
3399 * Include all data from pointers 0x3, 0x6-0xE. This excludes the
3400 * FW, PHY module, and PCIe Expansion/Option ROM pointers.
3401 */
3402 for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) {
3403 if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
3404 continue;
3405
3406 pointer = local_buffer[i];
3407
3408 /* Skip pointer section if the pointer is invalid. */
3409 if (pointer == 0xFFFF || pointer == 0 ||
3410 pointer >= hw->eeprom.word_size)
3411 continue;
3412
3413 switch (i) {
3414 case IXGBE_PCIE_GENERAL_PTR:
3415 size = IXGBE_IXGBE_PCIE_GENERAL_SIZE;
3416 break;
3417 case IXGBE_PCIE_CONFIG0_PTR:
3418 case IXGBE_PCIE_CONFIG1_PTR:
3419 size = IXGBE_PCIE_CONFIG_SIZE;
3420 break;
3421 default:
3422 size = 0;
3423 break;
3424 }
3425
3426 status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum,
3427 buffer, buffer_size);
3428 if (status)
3429 return status;
3430 }
3431
3432 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
3433
3434 return (s32)checksum;
3435 }
3436
3437 /**
3438 * ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
3439 * @hw: pointer to hardware structure
3440 *
3441 * Returns a negative error code on error, or the 16-bit checksum
3442 **/
ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw * hw)3443 s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
3444 {
3445 return ixgbe_calc_checksum_X550(hw, NULL, 0);
3446 }
3447
3448 /**
3449 * ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum
3450 * @hw: pointer to hardware structure
3451 * @checksum_val: calculated checksum
3452 *
3453 * Performs checksum calculation and validates the EEPROM checksum. If the
3454 * caller does not need checksum_val, the value can be NULL.
3455 **/
ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw * hw,u16 * checksum_val)3456 s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val)
3457 {
3458 s32 status;
3459 u16 checksum;
3460 u16 read_checksum = 0;
3461
3462 DEBUGFUNC("ixgbe_validate_eeprom_checksum_X550");
3463
3464 /* Read the first word from the EEPROM. If this times out or fails, do
3465 * not continue or we could be in for a very long wait while every
3466 * EEPROM read fails
3467 */
3468 status = hw->eeprom.ops.read(hw, 0, &checksum);
3469 if (status) {
3470 DEBUGOUT("EEPROM read failed\n");
3471 return status;
3472 }
3473
3474 status = hw->eeprom.ops.calc_checksum(hw);
3475 if (status < 0)
3476 return status;
3477
3478 checksum = (u16)(status & 0xffff);
3479
3480 status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
3481 &read_checksum);
3482 if (status)
3483 return status;
3484
3485 /* Verify read checksum from EEPROM is the same as
3486 * calculated checksum
3487 */
3488 if (read_checksum != checksum) {
3489 status = IXGBE_ERR_EEPROM_CHECKSUM;
3490 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
3491 "Invalid EEPROM checksum");
3492 }
3493
3494 /* If the user cares, return the calculated checksum */
3495 if (checksum_val)
3496 *checksum_val = checksum;
3497
3498 return status;
3499 }
3500
3501 /**
3502 * ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash
3503 * @hw: pointer to hardware structure
3504 *
3505 * After writing EEPROM to shadow RAM using EEWR register, software calculates
3506 * checksum and updates the EEPROM and instructs the hardware to update
3507 * the flash.
3508 **/
ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw * hw)3509 s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
3510 {
3511 s32 status;
3512 u16 checksum = 0;
3513
3514 DEBUGFUNC("ixgbe_update_eeprom_checksum_X550");
3515
3516 /* Read the first word from the EEPROM. If this times out or fails, do
3517 * not continue or we could be in for a very long wait while every
3518 * EEPROM read fails
3519 */
3520 status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum);
3521 if (status) {
3522 DEBUGOUT("EEPROM read failed\n");
3523 return status;
3524 }
3525
3526 status = ixgbe_calc_eeprom_checksum_X550(hw);
3527 if (status < 0)
3528 return status;
3529
3530 checksum = (u16)(status & 0xffff);
3531
3532 status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
3533 checksum);
3534 if (status)
3535 return status;
3536
3537 status = ixgbe_update_flash_X550(hw);
3538
3539 return status;
3540 }
3541
3542 /**
3543 * ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device
3544 * @hw: pointer to hardware structure
3545 *
3546 * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
3547 **/
ixgbe_update_flash_X550(struct ixgbe_hw * hw)3548 s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
3549 {
3550 s32 status = IXGBE_SUCCESS;
3551 union ixgbe_hic_hdr2 buffer;
3552
3553 DEBUGFUNC("ixgbe_update_flash_X550");
3554
3555 buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD;
3556 buffer.req.buf_lenh = 0;
3557 buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN;
3558 buffer.req.checksum = FW_DEFAULT_CHECKSUM;
3559
3560 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
3561 sizeof(buffer),
3562 IXGBE_HI_COMMAND_TIMEOUT, false);
3563
3564 return status;
3565 }
3566
3567 /**
3568 * ixgbe_get_supported_physical_layer_X550em - Returns physical layer type
3569 * @hw: pointer to hardware structure
3570 *
3571 * Determines physical layer capabilities of the current configuration.
3572 **/
ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw * hw)3573 u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
3574 {
3575 u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
3576 u16 ext_ability = 0;
3577
3578 DEBUGFUNC("ixgbe_get_supported_physical_layer_X550em");
3579
3580 hw->phy.ops.identify(hw);
3581
3582 switch (hw->phy.type) {
3583 case ixgbe_phy_x550em_kr:
3584 if (hw->mac.type == ixgbe_mac_X550EM_a) {
3585 if (hw->phy.nw_mng_if_sel &
3586 IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
3587 physical_layer =
3588 IXGBE_PHYSICAL_LAYER_2500BASE_KX;
3589 break;
3590 } else if (hw->device_id ==
3591 IXGBE_DEV_ID_X550EM_A_KR_L) {
3592 physical_layer =
3593 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3594 break;
3595 }
3596 }
3597 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR |
3598 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3599 break;
3600 case ixgbe_phy_x550em_xfi:
3601 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR |
3602 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3603 break;
3604 case ixgbe_phy_x550em_kx4:
3605 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
3606 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3607 break;
3608 case ixgbe_phy_x550em_ext_t:
3609 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
3610 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
3611 &ext_ability);
3612 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
3613 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
3614 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
3615 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
3616 break;
3617 case ixgbe_phy_fw:
3618 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_1GB_FULL)
3619 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
3620 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_100_FULL)
3621 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
3622 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_10_FULL)
3623 physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T;
3624 break;
3625 case ixgbe_phy_sgmii:
3626 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3627 break;
3628 case ixgbe_phy_ext_1g_t:
3629 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
3630 break;
3631 default:
3632 break;
3633 }
3634
3635 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
3636 physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
3637
3638 return physical_layer;
3639 }
3640
3641 /**
3642 * ixgbe_get_bus_info_X550em - Set PCI bus info
3643 * @hw: pointer to hardware structure
3644 *
3645 * Sets bus link width and speed to unknown because X550em is
3646 * not a PCI device.
3647 **/
ixgbe_get_bus_info_X550em(struct ixgbe_hw * hw)3648 s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
3649 {
3650
3651 DEBUGFUNC("ixgbe_get_bus_info_x550em");
3652
3653 hw->bus.width = ixgbe_bus_width_unknown;
3654 hw->bus.speed = ixgbe_bus_speed_unknown;
3655
3656 hw->mac.ops.set_lan_id(hw);
3657
3658 return IXGBE_SUCCESS;
3659 }
3660
3661 /**
3662 * ixgbe_disable_rx_x550 - Disable RX unit
3663 * @hw: pointer to hardware structure
3664 *
3665 * Enables the Rx DMA unit for x550
3666 **/
ixgbe_disable_rx_x550(struct ixgbe_hw * hw)3667 void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
3668 {
3669 u32 rxctrl, pfdtxgswc;
3670 s32 status;
3671 struct ixgbe_hic_disable_rxen fw_cmd;
3672
3673 DEBUGFUNC("ixgbe_enable_rx_dma_x550");
3674
3675 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3676 if (rxctrl & IXGBE_RXCTRL_RXEN) {
3677 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
3678 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
3679 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
3680 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
3681 hw->mac.set_lben = true;
3682 } else {
3683 hw->mac.set_lben = false;
3684 }
3685
3686 fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD;
3687 fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN;
3688 fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
3689 fw_cmd.port_number = (u8)hw->bus.lan_id;
3690
3691 status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
3692 sizeof(struct ixgbe_hic_disable_rxen),
3693 IXGBE_HI_COMMAND_TIMEOUT, true);
3694
3695 /* If we fail - disable RX using register write */
3696 if (status) {
3697 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3698 if (rxctrl & IXGBE_RXCTRL_RXEN) {
3699 rxctrl &= ~IXGBE_RXCTRL_RXEN;
3700 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
3701 }
3702 }
3703 }
3704 }
3705
3706 /**
3707 * ixgbe_enter_lplu_t_x550em - Transition to low power states
3708 * @hw: pointer to hardware structure
3709 *
3710 * Configures Low Power Link Up on transition to low power states
3711 * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
3712 * X557 PHY immediately prior to entering LPLU.
3713 **/
ixgbe_enter_lplu_t_x550em(struct ixgbe_hw * hw)3714 s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
3715 {
3716 u16 an_10g_cntl_reg, autoneg_reg, speed;
3717 s32 status;
3718 ixgbe_link_speed lcd_speed;
3719 u32 save_autoneg;
3720 bool link_up;
3721
3722 /* SW LPLU not required on later HW revisions. */
3723 if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
3724 (IXGBE_FUSES0_REV_MASK &
3725 IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
3726 return IXGBE_SUCCESS;
3727
3728 /* If blocked by MNG FW, then don't restart AN */
3729 if (ixgbe_check_reset_blocked(hw))
3730 return IXGBE_SUCCESS;
3731
3732 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3733 if (status != IXGBE_SUCCESS)
3734 return status;
3735
3736 status = ixgbe_read_eeprom(hw, NVM_INIT_CTRL_3, &hw->eeprom.ctrl_word_3);
3737
3738 if (status != IXGBE_SUCCESS)
3739 return status;
3740
3741 /* If link is down, LPLU disabled in NVM, WoL disabled, or manageability
3742 * disabled, then force link down by entering low power mode.
3743 */
3744 if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) ||
3745 !(hw->wol_enabled || ixgbe_mng_present(hw)))
3746 return ixgbe_set_copper_phy_power(hw, false);
3747
3748 /* Determine LCD */
3749 status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed);
3750
3751 if (status != IXGBE_SUCCESS)
3752 return status;
3753
3754 /* If no valid LCD link speed, then force link down and exit. */
3755 if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN)
3756 return ixgbe_set_copper_phy_power(hw, false);
3757
3758 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
3759 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3760 &speed);
3761
3762 if (status != IXGBE_SUCCESS)
3763 return status;
3764
3765 /* If no link now, speed is invalid so take link down */
3766 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3767 if (status != IXGBE_SUCCESS)
3768 return ixgbe_set_copper_phy_power(hw, false);
3769
3770 /* clear everything but the speed bits */
3771 speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK;
3772
3773 /* If current speed is already LCD, then exit. */
3774 if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) &&
3775 (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) ||
3776 ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) &&
3777 (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL)))
3778 return status;
3779
3780 /* Clear AN completed indication */
3781 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM,
3782 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3783 &autoneg_reg);
3784
3785 if (status != IXGBE_SUCCESS)
3786 return status;
3787
3788 status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
3789 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3790 &an_10g_cntl_reg);
3791
3792 if (status != IXGBE_SUCCESS)
3793 return status;
3794
3795 status = hw->phy.ops.read_reg(hw,
3796 IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
3797 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3798 &autoneg_reg);
3799
3800 if (status != IXGBE_SUCCESS)
3801 return status;
3802
3803 save_autoneg = hw->phy.autoneg_advertised;
3804
3805 /* Setup link at least common link speed */
3806 status = hw->mac.ops.setup_link(hw, lcd_speed, false);
3807
3808 /* restore autoneg from before setting lplu speed */
3809 hw->phy.autoneg_advertised = save_autoneg;
3810
3811 return status;
3812 }
3813
3814 /**
3815 * ixgbe_get_lcd_t_x550em - Determine lowest common denominator
3816 * @hw: pointer to hardware structure
3817 * @lcd_speed: pointer to lowest common link speed
3818 *
3819 * Determine lowest common link speed with link partner.
3820 **/
ixgbe_get_lcd_t_x550em(struct ixgbe_hw * hw,ixgbe_link_speed * lcd_speed)3821 s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed)
3822 {
3823 u16 an_lp_status;
3824 s32 status;
3825 u16 word = hw->eeprom.ctrl_word_3;
3826
3827 *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN;
3828
3829 status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS,
3830 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3831 &an_lp_status);
3832
3833 if (status != IXGBE_SUCCESS)
3834 return status;
3835
3836 /* If link partner advertised 1G, return 1G */
3837 if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) {
3838 *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL;
3839 return status;
3840 }
3841
3842 /* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */
3843 if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) ||
3844 (word & NVM_INIT_CTRL_3_D10GMP_PORT0))
3845 return status;
3846
3847 /* Link partner not capable of lower speeds, return 10G */
3848 *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL;
3849 return status;
3850 }
3851
3852 /**
3853 * ixgbe_setup_fc_X550em - Set up flow control
3854 * @hw: pointer to hardware structure
3855 *
3856 * Called at init time to set up flow control.
3857 **/
ixgbe_setup_fc_X550em(struct ixgbe_hw * hw)3858 s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw)
3859 {
3860 s32 ret_val = IXGBE_SUCCESS;
3861 u32 pause, asm_dir, reg_val;
3862
3863 DEBUGFUNC("ixgbe_setup_fc_X550em");
3864
3865 /* Validate the requested mode */
3866 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
3867 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
3868 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
3869 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
3870 goto out;
3871 }
3872
3873 /* 10gig parts do not have a word in the EEPROM to determine the
3874 * default flow control setting, so we explicitly set it to full.
3875 */
3876 if (hw->fc.requested_mode == ixgbe_fc_default)
3877 hw->fc.requested_mode = ixgbe_fc_full;
3878
3879 /* Determine PAUSE and ASM_DIR bits. */
3880 switch (hw->fc.requested_mode) {
3881 case ixgbe_fc_none:
3882 pause = 0;
3883 asm_dir = 0;
3884 break;
3885 case ixgbe_fc_tx_pause:
3886 pause = 0;
3887 asm_dir = 1;
3888 break;
3889 case ixgbe_fc_rx_pause:
3890 /* Rx Flow control is enabled and Tx Flow control is
3891 * disabled by software override. Since there really
3892 * isn't a way to advertise that we are capable of RX
3893 * Pause ONLY, we will advertise that we support both
3894 * symmetric and asymmetric Rx PAUSE, as such we fall
3895 * through to the fc_full statement. Later, we will
3896 * disable the adapter's ability to send PAUSE frames.
3897 */
3898 case ixgbe_fc_full:
3899 pause = 1;
3900 asm_dir = 1;
3901 break;
3902 default:
3903 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
3904 "Flow control param set incorrectly\n");
3905 ret_val = IXGBE_ERR_CONFIG;
3906 goto out;
3907 }
3908
3909 switch (hw->device_id) {
3910 case IXGBE_DEV_ID_X550EM_X_KR:
3911 case IXGBE_DEV_ID_X550EM_A_KR:
3912 case IXGBE_DEV_ID_X550EM_A_KR_L:
3913 ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
3914 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
3915 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3916 if (ret_val != IXGBE_SUCCESS)
3917 goto out;
3918 reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
3919 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
3920 if (pause)
3921 reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
3922 if (asm_dir)
3923 reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
3924 ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
3925 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
3926 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3927
3928 /* This device does not fully support AN. */
3929 hw->fc.disable_fc_autoneg = true;
3930 break;
3931 case IXGBE_DEV_ID_X550EM_X_XFI:
3932 hw->fc.disable_fc_autoneg = true;
3933 break;
3934 default:
3935 break;
3936 }
3937
3938 out:
3939 return ret_val;
3940 }
3941
3942 /**
3943 * ixgbe_fc_autoneg_backplane_x550em_a - Enable flow control IEEE clause 37
3944 * @hw: pointer to hardware structure
3945 *
3946 * Enable flow control according to IEEE clause 37.
3947 **/
ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw * hw)3948 void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw)
3949 {
3950 u32 link_s1, lp_an_page_low, an_cntl_1;
3951 s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
3952 ixgbe_link_speed speed;
3953 bool link_up;
3954
3955 /* AN should have completed when the cable was plugged in.
3956 * Look for reasons to bail out. Bail out if:
3957 * - FC autoneg is disabled, or if
3958 * - link is not up.
3959 */
3960 if (hw->fc.disable_fc_autoneg) {
3961 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
3962 "Flow control autoneg is disabled");
3963 goto out;
3964 }
3965
3966 hw->mac.ops.check_link(hw, &speed, &link_up, false);
3967 if (!link_up) {
3968 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
3969 goto out;
3970 }
3971
3972 /* Check at auto-negotiation has completed */
3973 status = hw->mac.ops.read_iosf_sb_reg(hw,
3974 IXGBE_KRM_LINK_S1(hw->bus.lan_id),
3975 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1);
3976
3977 if (status != IXGBE_SUCCESS ||
3978 (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) {
3979 DEBUGOUT("Auto-Negotiation did not complete\n");
3980 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
3981 goto out;
3982 }
3983
3984 /* Read the 10g AN autoc and LP ability registers and resolve
3985 * local flow control settings accordingly
3986 */
3987 status = hw->mac.ops.read_iosf_sb_reg(hw,
3988 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
3989 IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1);
3990
3991 if (status != IXGBE_SUCCESS) {
3992 DEBUGOUT("Auto-Negotiation did not complete\n");
3993 goto out;
3994 }
3995
3996 status = hw->mac.ops.read_iosf_sb_reg(hw,
3997 IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id),
3998 IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low);
3999
4000 if (status != IXGBE_SUCCESS) {
4001 DEBUGOUT("Auto-Negotiation did not complete\n");
4002 goto out;
4003 }
4004
4005 status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low,
4006 IXGBE_KRM_AN_CNTL_1_SYM_PAUSE,
4007 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE,
4008 IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE,
4009 IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE);
4010
4011 out:
4012 if (status == IXGBE_SUCCESS) {
4013 hw->fc.fc_was_autonegged = true;
4014 } else {
4015 hw->fc.fc_was_autonegged = false;
4016 hw->fc.current_mode = hw->fc.requested_mode;
4017 }
4018 }
4019
4020 /**
4021 * ixgbe_fc_autoneg_fiber_x550em_a - passthrough FC settings
4022 * @hw: pointer to hardware structure
4023 *
4024 **/
ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw * hw)4025 void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw)
4026 {
4027 hw->fc.fc_was_autonegged = false;
4028 hw->fc.current_mode = hw->fc.requested_mode;
4029 }
4030
4031 /**
4032 * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37
4033 * @hw: pointer to hardware structure
4034 *
4035 * Enable flow control according to IEEE clause 37.
4036 **/
ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw * hw)4037 void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
4038 {
4039 s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4040 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
4041 ixgbe_link_speed speed;
4042 bool link_up;
4043
4044 /* AN should have completed when the cable was plugged in.
4045 * Look for reasons to bail out. Bail out if:
4046 * - FC autoneg is disabled, or if
4047 * - link is not up.
4048 */
4049 if (hw->fc.disable_fc_autoneg) {
4050 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4051 "Flow control autoneg is disabled");
4052 goto out;
4053 }
4054
4055 hw->mac.ops.check_link(hw, &speed, &link_up, false);
4056 if (!link_up) {
4057 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
4058 goto out;
4059 }
4060
4061 /* Check if auto-negotiation has completed */
4062 status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info);
4063 if (status != IXGBE_SUCCESS ||
4064 !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) {
4065 DEBUGOUT("Auto-Negotiation did not complete\n");
4066 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4067 goto out;
4068 }
4069
4070 /* Negotiate the flow control */
4071 status = ixgbe_negotiate_fc(hw, info[0], info[0],
4072 FW_PHY_ACT_GET_LINK_INFO_FC_RX,
4073 FW_PHY_ACT_GET_LINK_INFO_FC_TX,
4074 FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX,
4075 FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX);
4076
4077 out:
4078 if (status == IXGBE_SUCCESS) {
4079 hw->fc.fc_was_autonegged = true;
4080 } else {
4081 hw->fc.fc_was_autonegged = false;
4082 hw->fc.current_mode = hw->fc.requested_mode;
4083 }
4084 }
4085
4086 /**
4087 * ixgbe_setup_fc_backplane_x550em_a - Set up flow control
4088 * @hw: pointer to hardware structure
4089 *
4090 * Called at init time to set up flow control.
4091 **/
ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw * hw)4092 s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
4093 {
4094 s32 status = IXGBE_SUCCESS;
4095 u32 an_cntl = 0;
4096
4097 DEBUGFUNC("ixgbe_setup_fc_backplane_x550em_a");
4098
4099 /* Validate the requested mode */
4100 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
4101 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4102 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
4103 return IXGBE_ERR_INVALID_LINK_SETTINGS;
4104 }
4105
4106 if (hw->fc.requested_mode == ixgbe_fc_default)
4107 hw->fc.requested_mode = ixgbe_fc_full;
4108
4109 /* Set up the 1G and 10G flow control advertisement registers so the
4110 * HW will be able to do FC autoneg once the cable is plugged in. If
4111 * we link at 10G, the 1G advertisement is harmless and vice versa.
4112 */
4113 status = hw->mac.ops.read_iosf_sb_reg(hw,
4114 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4115 IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl);
4116
4117 if (status != IXGBE_SUCCESS) {
4118 DEBUGOUT("Auto-Negotiation did not complete\n");
4119 return status;
4120 }
4121
4122 /* The possible values of fc.requested_mode are:
4123 * 0: Flow control is completely disabled
4124 * 1: Rx flow control is enabled (we can receive pause frames,
4125 * but not send pause frames).
4126 * 2: Tx flow control is enabled (we can send pause frames but
4127 * we do not support receiving pause frames).
4128 * 3: Both Rx and Tx flow control (symmetric) are enabled.
4129 * other: Invalid.
4130 */
4131 switch (hw->fc.requested_mode) {
4132 case ixgbe_fc_none:
4133 /* Flow control completely disabled by software override. */
4134 an_cntl &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4135 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
4136 break;
4137 case ixgbe_fc_tx_pause:
4138 /* Tx Flow control is enabled, and Rx Flow control is
4139 * disabled by software override.
4140 */
4141 an_cntl |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4142 an_cntl &= ~IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
4143 break;
4144 case ixgbe_fc_rx_pause:
4145 /* Rx Flow control is enabled and Tx Flow control is
4146 * disabled by software override. Since there really
4147 * isn't a way to advertise that we are capable of RX
4148 * Pause ONLY, we will advertise that we support both
4149 * symmetric and asymmetric Rx PAUSE, as such we fall
4150 * through to the fc_full statement. Later, we will
4151 * disable the adapter's ability to send PAUSE frames.
4152 */
4153 case ixgbe_fc_full:
4154 /* Flow control (both Rx and Tx) is enabled by SW override. */
4155 an_cntl |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4156 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4157 break;
4158 default:
4159 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
4160 "Flow control param set incorrectly\n");
4161 return IXGBE_ERR_CONFIG;
4162 }
4163
4164 status = hw->mac.ops.write_iosf_sb_reg(hw,
4165 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4166 IXGBE_SB_IOSF_TARGET_KR_PHY, an_cntl);
4167
4168 /* Restart auto-negotiation. */
4169 status = ixgbe_restart_an_internal_phy_x550em(hw);
4170
4171 return status;
4172 }
4173
4174 /**
4175 * ixgbe_set_mux - Set mux for port 1 access with CS4227
4176 * @hw: pointer to hardware structure
4177 * @state: set mux if 1, clear if 0
4178 */
ixgbe_set_mux(struct ixgbe_hw * hw,u8 state)4179 static void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state)
4180 {
4181 u32 esdp;
4182
4183 if (!hw->bus.lan_id)
4184 return;
4185 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
4186 if (state)
4187 esdp |= IXGBE_ESDP_SDP1;
4188 else
4189 esdp &= ~IXGBE_ESDP_SDP1;
4190 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4191 IXGBE_WRITE_FLUSH(hw);
4192 }
4193
4194 /**
4195 * ixgbe_acquire_swfw_sync_X550em - Acquire SWFW semaphore
4196 * @hw: pointer to hardware structure
4197 * @mask: Mask to specify which semaphore to acquire
4198 *
4199 * Acquires the SWFW semaphore and sets the I2C MUX
4200 **/
ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw * hw,u32 mask)4201 s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
4202 {
4203 s32 status;
4204
4205 DEBUGFUNC("ixgbe_acquire_swfw_sync_X550em");
4206
4207 status = ixgbe_acquire_swfw_sync_X540(hw, mask);
4208 if (status)
4209 return status;
4210
4211 if (mask & IXGBE_GSSR_I2C_MASK)
4212 ixgbe_set_mux(hw, 1);
4213
4214 return IXGBE_SUCCESS;
4215 }
4216
4217 /**
4218 * ixgbe_release_swfw_sync_X550em - Release SWFW semaphore
4219 * @hw: pointer to hardware structure
4220 * @mask: Mask to specify which semaphore to release
4221 *
4222 * Releases the SWFW semaphore and sets the I2C MUX
4223 **/
ixgbe_release_swfw_sync_X550em(struct ixgbe_hw * hw,u32 mask)4224 void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
4225 {
4226 DEBUGFUNC("ixgbe_release_swfw_sync_X550em");
4227
4228 if (mask & IXGBE_GSSR_I2C_MASK)
4229 ixgbe_set_mux(hw, 0);
4230
4231 ixgbe_release_swfw_sync_X540(hw, mask);
4232 }
4233
4234 /**
4235 * ixgbe_acquire_swfw_sync_X550a - Acquire SWFW semaphore
4236 * @hw: pointer to hardware structure
4237 * @mask: Mask to specify which semaphore to acquire
4238 *
4239 * Acquires the SWFW semaphore and get the shared phy token as needed
4240 */
ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw * hw,u32 mask)4241 static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
4242 {
4243 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
4244 int retries = FW_PHY_TOKEN_RETRIES;
4245 s32 status = IXGBE_SUCCESS;
4246
4247 DEBUGFUNC("ixgbe_acquire_swfw_sync_X550a");
4248
4249 status = IXGBE_SUCCESS;
4250 if (hmask)
4251 status = ixgbe_acquire_swfw_sync_X540(hw, hmask);
4252
4253 if (status) {
4254 DEBUGOUT1("Could not acquire SWFW semaphore, Status = %d\n", status);
4255 return status;
4256 }
4257
4258 if (!(mask & IXGBE_GSSR_TOKEN_SM))
4259 return IXGBE_SUCCESS;
4260
4261 while (--retries) {
4262 status = ixgbe_get_phy_token(hw);
4263
4264 if (status == IXGBE_SUCCESS)
4265 return IXGBE_SUCCESS;
4266
4267 if (status != IXGBE_ERR_TOKEN_RETRY) {
4268 DEBUGOUT1("Retry acquiring the PHY token failed, Status = %d\n", status);
4269 if (hmask)
4270 ixgbe_release_swfw_sync_X540(hw, hmask);
4271 return status;
4272 }
4273
4274 if (status == IXGBE_ERR_TOKEN_RETRY)
4275 DEBUGOUT1("Could not acquire PHY token, Status = %d\n",
4276 status);
4277 }
4278
4279 if (hmask)
4280 ixgbe_release_swfw_sync_X540(hw, hmask);
4281
4282 DEBUGOUT1("Semaphore acquisition retries failed!: PHY ID = 0x%08X\n",
4283 hw->phy.id);
4284 return status;
4285 }
4286
4287 /**
4288 * ixgbe_release_swfw_sync_X550a - Release SWFW semaphore
4289 * @hw: pointer to hardware structure
4290 * @mask: Mask to specify which semaphore to release
4291 *
4292 * Releases the SWFW semaphore and puts the shared phy token as needed
4293 */
ixgbe_release_swfw_sync_X550a(struct ixgbe_hw * hw,u32 mask)4294 static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
4295 {
4296 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
4297
4298 DEBUGFUNC("ixgbe_release_swfw_sync_X550a");
4299
4300 if (mask & IXGBE_GSSR_TOKEN_SM)
4301 ixgbe_put_phy_token(hw);
4302
4303 if (hmask)
4304 ixgbe_release_swfw_sync_X540(hw, hmask);
4305 }
4306
4307 /**
4308 * ixgbe_read_phy_reg_x550a - Reads specified PHY register
4309 * @hw: pointer to hardware structure
4310 * @reg_addr: 32 bit address of PHY register to read
4311 * @device_type: 5 bit device type
4312 * @phy_data: Pointer to read data from PHY register
4313 *
4314 * Reads a value from a specified PHY register using the SWFW lock and PHY
4315 * Token. The PHY Token is needed since the MDIO is shared between to MAC
4316 * instances.
4317 **/
ixgbe_read_phy_reg_x550a(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u16 * phy_data)4318 s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
4319 u32 device_type, u16 *phy_data)
4320 {
4321 s32 status;
4322 u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
4323
4324 DEBUGFUNC("ixgbe_read_phy_reg_x550a");
4325
4326 if (hw->mac.ops.acquire_swfw_sync(hw, mask))
4327 return IXGBE_ERR_SWFW_SYNC;
4328
4329 status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
4330
4331 hw->mac.ops.release_swfw_sync(hw, mask);
4332
4333 return status;
4334 }
4335
4336 /**
4337 * ixgbe_write_phy_reg_x550a - Writes specified PHY register
4338 * @hw: pointer to hardware structure
4339 * @reg_addr: 32 bit PHY register to write
4340 * @device_type: 5 bit device type
4341 * @phy_data: Data to write to the PHY register
4342 *
4343 * Writes a value to specified PHY register using the SWFW lock and PHY Token.
4344 * The PHY Token is needed since the MDIO is shared between to MAC instances.
4345 **/
ixgbe_write_phy_reg_x550a(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u16 phy_data)4346 s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
4347 u32 device_type, u16 phy_data)
4348 {
4349 s32 status;
4350 u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
4351
4352 DEBUGFUNC("ixgbe_write_phy_reg_x550a");
4353
4354 if (hw->mac.ops.acquire_swfw_sync(hw, mask) == IXGBE_SUCCESS) {
4355 status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type,
4356 phy_data);
4357 hw->mac.ops.release_swfw_sync(hw, mask);
4358 } else {
4359 status = IXGBE_ERR_SWFW_SYNC;
4360 }
4361
4362 return status;
4363 }
4364
4365 /**
4366 * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt
4367 * @hw: pointer to hardware structure
4368 *
4369 * Handle external Base T PHY interrupt. If high temperature
4370 * failure alarm then return error, else if link status change
4371 * then setup internal/external PHY link
4372 *
4373 * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
4374 * failure alarm, else return PHY access status.
4375 */
ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw * hw)4376 s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw)
4377 {
4378 bool lsc;
4379 u32 status;
4380
4381 status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
4382
4383 if (status != IXGBE_SUCCESS)
4384 return status;
4385
4386 if (lsc)
4387 return ixgbe_setup_internal_phy(hw);
4388
4389 return IXGBE_SUCCESS;
4390 }
4391
4392 /**
4393 * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
4394 * @hw: pointer to hardware structure
4395 * @speed: new link speed
4396 * @autoneg_wait_to_complete: true when waiting for completion is needed
4397 *
4398 * Setup internal/external PHY link speed based on link speed, then set
4399 * external PHY auto advertised link speed.
4400 *
4401 * Returns error status for any failure
4402 **/
ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)4403 s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
4404 ixgbe_link_speed speed,
4405 bool autoneg_wait_to_complete)
4406 {
4407 s32 status;
4408 ixgbe_link_speed force_speed;
4409 u32 i;
4410 bool link_up = false;
4411
4412 DEBUGFUNC("ixgbe_setup_mac_link_t_X550em");
4413
4414 /* Setup internal/external PHY link speed to iXFI (10G), unless
4415 * only 1G is auto advertised then setup KX link.
4416 */
4417 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
4418 force_speed = IXGBE_LINK_SPEED_10GB_FULL;
4419 else
4420 force_speed = IXGBE_LINK_SPEED_1GB_FULL;
4421
4422 /* If X552 and internal link mode is XFI, then setup XFI internal link.
4423 */
4424 if (hw->mac.type == ixgbe_mac_X550EM_x &&
4425 !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
4426 status = ixgbe_setup_ixfi_x550em(hw, &force_speed);
4427
4428 if (status != IXGBE_SUCCESS)
4429 return status;
4430
4431 /* Wait for the controller to acquire link */
4432 for (i = 0; i < 10; i++) {
4433 msec_delay(100);
4434
4435 status = ixgbe_check_link(hw, &force_speed, &link_up,
4436 false);
4437 if (status != IXGBE_SUCCESS)
4438 return status;
4439
4440 if (link_up)
4441 break;
4442 }
4443 }
4444
4445 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete);
4446 }
4447
4448 /**
4449 * ixgbe_check_link_t_X550em - Determine link and speed status
4450 * @hw: pointer to hardware structure
4451 * @speed: pointer to link speed
4452 * @link_up: true when link is up
4453 * @link_up_wait_to_complete: bool used to wait for link up or not
4454 *
4455 * Check that both the MAC and X557 external PHY have link.
4456 **/
ixgbe_check_link_t_X550em(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,bool link_up_wait_to_complete)4457 s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4458 bool *link_up, bool link_up_wait_to_complete)
4459 {
4460 u32 status;
4461 u16 i, autoneg_status = 0;
4462
4463 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
4464 return IXGBE_ERR_CONFIG;
4465
4466 status = ixgbe_check_mac_link_generic(hw, speed, link_up,
4467 link_up_wait_to_complete);
4468
4469 /* If check link fails or MAC link is not up, then return */
4470 if (status != IXGBE_SUCCESS || !(*link_up))
4471 return status;
4472
4473 /* MAC link is up, so check external PHY link.
4474 * X557 PHY. Link status is latching low, and can only be used to detect
4475 * link drop, and not the current status of the link without performing
4476 * back-to-back reads.
4477 */
4478 for (i = 0; i < 2; i++) {
4479 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
4480 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4481 &autoneg_status);
4482
4483 if (status != IXGBE_SUCCESS)
4484 return status;
4485 }
4486
4487 /* If external PHY link is not up, then indicate link not up */
4488 if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
4489 *link_up = false;
4490
4491 return IXGBE_SUCCESS;
4492 }
4493
4494 /**
4495 * ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI
4496 * @hw: pointer to hardware structure
4497 **/
ixgbe_reset_phy_t_X550em(struct ixgbe_hw * hw)4498 s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
4499 {
4500 s32 status;
4501
4502 status = ixgbe_reset_phy_generic(hw);
4503
4504 if (status != IXGBE_SUCCESS)
4505 return status;
4506
4507 /* Configure Link Status Alarm and Temperature Threshold interrupts */
4508 return ixgbe_enable_lasi_ext_t_x550em(hw);
4509 }
4510
4511 /**
4512 * ixgbe_led_on_t_X550em - Turns on the software controllable LEDs.
4513 * @hw: pointer to hardware structure
4514 * @led_idx: led number to turn on
4515 **/
ixgbe_led_on_t_X550em(struct ixgbe_hw * hw,u32 led_idx)4516 s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
4517 {
4518 u16 phy_data;
4519
4520 DEBUGFUNC("ixgbe_led_on_t_X550em");
4521
4522 if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
4523 return IXGBE_ERR_PARAM;
4524
4525 /* To turn on the LED, set mode to ON. */
4526 ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4527 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
4528 phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK;
4529 ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4530 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
4531
4532 /* Some designs have the LEDs wired to the MAC */
4533 return ixgbe_led_on_generic(hw, led_idx);
4534 }
4535
4536 /**
4537 * ixgbe_led_off_t_X550em - Turns off the software controllable LEDs.
4538 * @hw: pointer to hardware structure
4539 * @led_idx: led number to turn off
4540 **/
ixgbe_led_off_t_X550em(struct ixgbe_hw * hw,u32 led_idx)4541 s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
4542 {
4543 u16 phy_data;
4544
4545 DEBUGFUNC("ixgbe_led_off_t_X550em");
4546
4547 if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
4548 return IXGBE_ERR_PARAM;
4549
4550 /* To turn on the LED, set mode to ON. */
4551 ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4552 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
4553 phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK;
4554 ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4555 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
4556
4557 /* Some designs have the LEDs wired to the MAC */
4558 return ixgbe_led_off_generic(hw, led_idx);
4559 }
4560
4561 /**
4562 * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware
4563 * @hw: pointer to the HW structure
4564 * @maj: driver version major number
4565 * @min: driver version minor number
4566 * @build: driver version build number
4567 * @sub: driver version sub build number
4568 * @len: length of driver_ver string
4569 * @driver_ver: driver string
4570 *
4571 * Sends driver version number to firmware through the manageability
4572 * block. On success return IXGBE_SUCCESS
4573 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4574 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4575 **/
ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw * hw,u8 maj,u8 min,u8 build,u8 sub,u16 len,const char * driver_ver)4576 s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
4577 u8 build, u8 sub, u16 len, const char *driver_ver)
4578 {
4579 struct ixgbe_hic_drv_info2 fw_cmd;
4580 s32 ret_val = IXGBE_SUCCESS;
4581 int i;
4582
4583 DEBUGFUNC("ixgbe_set_fw_drv_ver_x550");
4584
4585 if ((len == 0) || (driver_ver == NULL) ||
4586 (len > sizeof(fw_cmd.driver_string)))
4587 return IXGBE_ERR_INVALID_ARGUMENT;
4588
4589 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4590 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len;
4591 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4592 fw_cmd.port_num = (u8)hw->bus.func;
4593 fw_cmd.ver_maj = maj;
4594 fw_cmd.ver_min = min;
4595 fw_cmd.ver_build = build;
4596 fw_cmd.ver_sub = sub;
4597 fw_cmd.hdr.checksum = 0;
4598 memcpy(fw_cmd.driver_string, driver_ver, len);
4599 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4600 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4601
4602 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4603 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4604 sizeof(fw_cmd),
4605 IXGBE_HI_COMMAND_TIMEOUT,
4606 true);
4607 if (ret_val != IXGBE_SUCCESS)
4608 continue;
4609
4610 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4611 FW_CEM_RESP_STATUS_SUCCESS)
4612 ret_val = IXGBE_SUCCESS;
4613 else
4614 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4615
4616 break;
4617 }
4618
4619 return ret_val;
4620 }
4621
4622 /**
4623 * ixgbe_fw_recovery_mode_X550 - Check FW NVM recovery mode
4624 * @hw: pointer t hardware structure
4625 *
4626 * Returns true if in FW NVM recovery mode.
4627 **/
ixgbe_fw_recovery_mode_X550(struct ixgbe_hw * hw)4628 bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw)
4629 {
4630 u32 fwsm;
4631
4632 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
4633
4634 return !!(fwsm & IXGBE_FWSM_FW_NVM_RECOVERY_MODE);
4635 }
4636