1 /******************************************************************************
2
3 Copyright (c) 2001-2020, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33
34 #include "ixgbe_x550.h"
35 #include "ixgbe_x540.h"
36 #include "ixgbe_type.h"
37 #include "ixgbe_api.h"
38 #include "ixgbe_common.h"
39 #include "ixgbe_phy.h"
40
41 static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed);
42 static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
43 static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
44 static s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw);
45
46 /**
47 * ixgbe_init_ops_X550 - Inits func ptrs and MAC type
48 * @hw: pointer to hardware structure
49 *
50 * Initialize the function pointers and assign the MAC type for X550.
51 * Does not touch the hardware.
52 **/
ixgbe_init_ops_X550(struct ixgbe_hw * hw)53 s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw)
54 {
55 struct ixgbe_mac_info *mac = &hw->mac;
56 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
57 s32 ret_val;
58
59 DEBUGFUNC("ixgbe_init_ops_X550");
60
61 ret_val = ixgbe_init_ops_X540(hw);
62 mac->ops.dmac_config = ixgbe_dmac_config_X550;
63 mac->ops.dmac_config_tcs = ixgbe_dmac_config_tcs_X550;
64 mac->ops.dmac_update_tcs = ixgbe_dmac_update_tcs_X550;
65 mac->ops.setup_eee = NULL;
66 mac->ops.set_source_address_pruning =
67 ixgbe_set_source_address_pruning_X550;
68 mac->ops.set_ethertype_anti_spoofing =
69 ixgbe_set_ethertype_anti_spoofing_X550;
70
71 mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
72 eeprom->ops.init_params = ixgbe_init_eeprom_params_X550;
73 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
74 eeprom->ops.read = ixgbe_read_ee_hostif_X550;
75 eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
76 eeprom->ops.write = ixgbe_write_ee_hostif_X550;
77 eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
78 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
79 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
80
81 mac->ops.disable_mdd = ixgbe_disable_mdd_X550;
82 mac->ops.enable_mdd = ixgbe_enable_mdd_X550;
83 mac->ops.mdd_event = ixgbe_mdd_event_X550;
84 mac->ops.restore_mdd_vf = ixgbe_restore_mdd_vf_X550;
85 mac->ops.fw_recovery_mode = ixgbe_fw_recovery_mode_X550;
86 mac->ops.disable_rx = ixgbe_disable_rx_x550;
87 /* Manageability interface */
88 mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550;
89 switch (hw->device_id) {
90 case IXGBE_DEV_ID_X550EM_X_1G_T:
91 hw->mac.ops.led_on = NULL;
92 hw->mac.ops.led_off = NULL;
93 break;
94 case IXGBE_DEV_ID_X550EM_X_10G_T:
95 case IXGBE_DEV_ID_X550EM_A_10G_T:
96 hw->mac.ops.led_on = ixgbe_led_on_t_X550em;
97 hw->mac.ops.led_off = ixgbe_led_off_t_X550em;
98 break;
99 default:
100 break;
101 }
102 return ret_val;
103 }
104
105 /**
106 * ixgbe_read_cs4227 - Read CS4227 register
107 * @hw: pointer to hardware structure
108 * @reg: register number to write
109 * @value: pointer to receive value read
110 *
111 * Returns status code
112 **/
ixgbe_read_cs4227(struct ixgbe_hw * hw,u16 reg,u16 * value)113 static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
114 {
115 return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value);
116 }
117
118 /**
119 * ixgbe_write_cs4227 - Write CS4227 register
120 * @hw: pointer to hardware structure
121 * @reg: register number to write
122 * @value: value to write to register
123 *
124 * Returns status code
125 **/
ixgbe_write_cs4227(struct ixgbe_hw * hw,u16 reg,u16 value)126 static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
127 {
128 return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value);
129 }
130
131 /**
132 * ixgbe_read_pe - Read register from port expander
133 * @hw: pointer to hardware structure
134 * @reg: register number to read
135 * @value: pointer to receive read value
136 *
137 * Returns status code
138 **/
ixgbe_read_pe(struct ixgbe_hw * hw,u8 reg,u8 * value)139 static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
140 {
141 s32 status;
142
143 status = ixgbe_read_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
144 if (status != IXGBE_SUCCESS)
145 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
146 "port expander access failed with %d\n", status);
147 return status;
148 }
149
150 /**
151 * ixgbe_write_pe - Write register to port expander
152 * @hw: pointer to hardware structure
153 * @reg: register number to write
154 * @value: value to write
155 *
156 * Returns status code
157 **/
ixgbe_write_pe(struct ixgbe_hw * hw,u8 reg,u8 value)158 static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
159 {
160 s32 status;
161
162 status = ixgbe_write_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
163 if (status != IXGBE_SUCCESS)
164 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
165 "port expander access failed with %d\n", status);
166 return status;
167 }
168
169 /**
170 * ixgbe_reset_cs4227 - Reset CS4227 using port expander
171 * @hw: pointer to hardware structure
172 *
173 * This function assumes that the caller has acquired the proper semaphore.
174 * Returns error code
175 **/
ixgbe_reset_cs4227(struct ixgbe_hw * hw)176 static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
177 {
178 s32 status;
179 u32 retry;
180 u16 value;
181 u8 reg;
182
183 /* Trigger hard reset. */
184 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
185 if (status != IXGBE_SUCCESS)
186 return status;
187 reg |= IXGBE_PE_BIT1;
188 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
189 if (status != IXGBE_SUCCESS)
190 return status;
191
192 status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, ®);
193 if (status != IXGBE_SUCCESS)
194 return status;
195 reg &= ~IXGBE_PE_BIT1;
196 status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg);
197 if (status != IXGBE_SUCCESS)
198 return status;
199
200 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
201 if (status != IXGBE_SUCCESS)
202 return status;
203 reg &= ~IXGBE_PE_BIT1;
204 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
205 if (status != IXGBE_SUCCESS)
206 return status;
207
208 usec_delay(IXGBE_CS4227_RESET_HOLD);
209
210 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
211 if (status != IXGBE_SUCCESS)
212 return status;
213 reg |= IXGBE_PE_BIT1;
214 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
215 if (status != IXGBE_SUCCESS)
216 return status;
217
218 /* Wait for the reset to complete. */
219 msec_delay(IXGBE_CS4227_RESET_DELAY);
220 for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
221 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EFUSE_STATUS,
222 &value);
223 if (status == IXGBE_SUCCESS &&
224 value == IXGBE_CS4227_EEPROM_LOAD_OK)
225 break;
226 msec_delay(IXGBE_CS4227_CHECK_DELAY);
227 }
228 if (retry == IXGBE_CS4227_RETRIES) {
229 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
230 "CS4227 reset did not complete.");
231 return IXGBE_ERR_PHY;
232 }
233
234 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value);
235 if (status != IXGBE_SUCCESS ||
236 !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) {
237 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
238 "CS4227 EEPROM did not load successfully.");
239 return IXGBE_ERR_PHY;
240 }
241
242 return IXGBE_SUCCESS;
243 }
244
245 /**
246 * ixgbe_check_cs4227 - Check CS4227 and reset as needed
247 * @hw: pointer to hardware structure
248 **/
ixgbe_check_cs4227(struct ixgbe_hw * hw)249 static void ixgbe_check_cs4227(struct ixgbe_hw *hw)
250 {
251 s32 status = IXGBE_SUCCESS;
252 u32 swfw_mask = hw->phy.phy_semaphore_mask;
253 u16 value = 0;
254 u8 retry;
255
256 for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
257 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
258 if (status != IXGBE_SUCCESS) {
259 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
260 "semaphore failed with %d", status);
261 msec_delay(IXGBE_CS4227_CHECK_DELAY);
262 continue;
263 }
264
265 /* Get status of reset flow. */
266 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value);
267
268 if (status == IXGBE_SUCCESS &&
269 value == IXGBE_CS4227_RESET_COMPLETE)
270 goto out;
271
272 if (status != IXGBE_SUCCESS ||
273 value != IXGBE_CS4227_RESET_PENDING)
274 break;
275
276 /* Reset is pending. Wait and check again. */
277 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
278 msec_delay(IXGBE_CS4227_CHECK_DELAY);
279 }
280
281 /* If still pending, assume other instance failed. */
282 if (retry == IXGBE_CS4227_RETRIES) {
283 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
284 if (status != IXGBE_SUCCESS) {
285 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
286 "semaphore failed with %d", status);
287 return;
288 }
289 }
290
291 /* Reset the CS4227. */
292 status = ixgbe_reset_cs4227(hw);
293 if (status != IXGBE_SUCCESS) {
294 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
295 "CS4227 reset failed: %d", status);
296 goto out;
297 }
298
299 /* Reset takes so long, temporarily release semaphore in case the
300 * other driver instance is waiting for the reset indication.
301 */
302 ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
303 IXGBE_CS4227_RESET_PENDING);
304 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
305 msec_delay(10);
306 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
307 if (status != IXGBE_SUCCESS) {
308 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
309 "semaphore failed with %d", status);
310 return;
311 }
312
313 /* Record completion for next time. */
314 status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
315 IXGBE_CS4227_RESET_COMPLETE);
316
317 out:
318 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
319 msec_delay(hw->eeprom.semaphore_delay);
320 }
321
322 /**
323 * ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control
324 * @hw: pointer to hardware structure
325 **/
ixgbe_setup_mux_ctl(struct ixgbe_hw * hw)326 static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
327 {
328 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
329
330 if (hw->bus.lan_id) {
331 esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
332 esdp |= IXGBE_ESDP_SDP1_DIR;
333 }
334 esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
335 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
336 IXGBE_WRITE_FLUSH(hw);
337 }
338
339 /**
340 * ixgbe_identify_phy_x550em - Get PHY type based on device id
341 * @hw: pointer to hardware structure
342 *
343 * Returns error code
344 */
ixgbe_identify_phy_x550em(struct ixgbe_hw * hw)345 static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
346 {
347 hw->mac.ops.set_lan_id(hw);
348
349 ixgbe_read_mng_if_sel_x550em(hw);
350
351 switch (hw->device_id) {
352 case IXGBE_DEV_ID_X550EM_A_SFP:
353 return ixgbe_identify_sfp_module_X550em(hw);
354 case IXGBE_DEV_ID_X550EM_X_SFP:
355 /* set up for CS4227 usage */
356 ixgbe_setup_mux_ctl(hw);
357 ixgbe_check_cs4227(hw);
358 return ixgbe_identify_sfp_module_X550em(hw);
359 case IXGBE_DEV_ID_X550EM_A_SFP_N:
360 return ixgbe_identify_sfp_module_X550em(hw);
361 break;
362 case IXGBE_DEV_ID_X550EM_X_KX4:
363 hw->phy.type = ixgbe_phy_x550em_kx4;
364 break;
365 case IXGBE_DEV_ID_X550EM_X_XFI:
366 hw->phy.type = ixgbe_phy_x550em_xfi;
367 break;
368 case IXGBE_DEV_ID_X550EM_X_KR:
369 case IXGBE_DEV_ID_X550EM_A_KR:
370 case IXGBE_DEV_ID_X550EM_A_KR_L:
371 hw->phy.type = ixgbe_phy_x550em_kr;
372 break;
373 case IXGBE_DEV_ID_X550EM_A_10G_T:
374 case IXGBE_DEV_ID_X550EM_X_10G_T:
375 return ixgbe_identify_phy_generic(hw);
376 case IXGBE_DEV_ID_X550EM_X_1G_T:
377 hw->phy.type = ixgbe_phy_ext_1g_t;
378 break;
379 case IXGBE_DEV_ID_X550EM_A_1G_T:
380 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
381 hw->phy.type = ixgbe_phy_fw;
382 if (hw->bus.lan_id)
383 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
384 else
385 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
386 break;
387 default:
388 break;
389 }
390 return IXGBE_SUCCESS;
391 }
392
393 /**
394 * ixgbe_fw_phy_activity - Perform an activity on a PHY
395 * @hw: pointer to hardware structure
396 * @activity: activity to perform
397 * @data: Pointer to 4 32-bit words of data
398 */
ixgbe_fw_phy_activity(struct ixgbe_hw * hw,u16 activity,u32 (* data)[FW_PHY_ACT_DATA_COUNT])399 s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
400 u32 (*data)[FW_PHY_ACT_DATA_COUNT])
401 {
402 union {
403 struct ixgbe_hic_phy_activity_req cmd;
404 struct ixgbe_hic_phy_activity_resp rsp;
405 } hic;
406 u16 retries = FW_PHY_ACT_RETRIES;
407 s32 rc;
408 u16 i;
409
410 do {
411 memset(&hic, 0, sizeof(hic));
412 hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD;
413 hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN;
414 hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
415 hic.cmd.port_number = hw->bus.lan_id;
416 hic.cmd.activity_id = IXGBE_CPU_TO_LE16(activity);
417 for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
418 hic.cmd.data[i] = IXGBE_CPU_TO_BE32((*data)[i]);
419
420 rc = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
421 sizeof(hic.cmd),
422 IXGBE_HI_COMMAND_TIMEOUT,
423 true);
424 if (rc != IXGBE_SUCCESS)
425 return rc;
426 if (hic.rsp.hdr.cmd_or_resp.ret_status ==
427 FW_CEM_RESP_STATUS_SUCCESS) {
428 for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
429 (*data)[i] = IXGBE_BE32_TO_CPU(hic.rsp.data[i]);
430 return IXGBE_SUCCESS;
431 }
432 usec_delay(20);
433 --retries;
434 } while (retries > 0);
435
436 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
437 }
438
439 static const struct {
440 u16 fw_speed;
441 ixgbe_link_speed phy_speed;
442 } ixgbe_fw_map[] = {
443 { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL },
444 { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL },
445 { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL },
446 { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL },
447 { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL },
448 { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL },
449 };
450
451 /**
452 * ixgbe_get_phy_id_fw - Get the phy ID via firmware command
453 * @hw: pointer to hardware structure
454 *
455 * Returns error code
456 */
ixgbe_get_phy_id_fw(struct ixgbe_hw * hw)457 static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
458 {
459 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
460 u16 phy_speeds;
461 u16 phy_id_lo;
462 s32 rc;
463 u16 i;
464
465 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info);
466 if (rc)
467 return rc;
468
469 hw->phy.speeds_supported = 0;
470 phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK;
471 for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
472 if (phy_speeds & ixgbe_fw_map[i].fw_speed)
473 hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed;
474 }
475 if (!hw->phy.autoneg_advertised)
476 hw->phy.autoneg_advertised = hw->phy.speeds_supported;
477
478 hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK;
479 phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK;
480 hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK;
481 hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK;
482 if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK)
483 return IXGBE_ERR_PHY_ADDR_INVALID;
484 return IXGBE_SUCCESS;
485 }
486
487 /**
488 * ixgbe_identify_phy_fw - Get PHY type based on firmware command
489 * @hw: pointer to hardware structure
490 *
491 * Returns error code
492 */
ixgbe_identify_phy_fw(struct ixgbe_hw * hw)493 static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
494 {
495 if (hw->bus.lan_id)
496 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
497 else
498 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
499
500 hw->phy.type = ixgbe_phy_fw;
501 hw->phy.ops.read_reg = NULL;
502 hw->phy.ops.write_reg = NULL;
503 return ixgbe_get_phy_id_fw(hw);
504 }
505
506 /**
507 * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY
508 * @hw: pointer to hardware structure
509 *
510 * Returns error code
511 */
ixgbe_shutdown_fw_phy(struct ixgbe_hw * hw)512 s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
513 {
514 u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
515
516 setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF;
517 return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup);
518 }
519
ixgbe_read_phy_reg_x550em(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u16 * phy_data)520 static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
521 u32 device_type, u16 *phy_data)
522 {
523 UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, *phy_data);
524 return IXGBE_NOT_IMPLEMENTED;
525 }
526
ixgbe_write_phy_reg_x550em(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u16 phy_data)527 static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
528 u32 device_type, u16 phy_data)
529 {
530 UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, phy_data);
531 return IXGBE_NOT_IMPLEMENTED;
532 }
533
534 /**
535 * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
536 * @hw: pointer to the hardware structure
537 * @addr: I2C bus address to read from
538 * @reg: I2C device register to read from
539 * @val: pointer to location to receive read value
540 *
541 * Returns an error code on error.
542 **/
ixgbe_read_i2c_combined_generic(struct ixgbe_hw * hw,u8 addr,u16 reg,u16 * val)543 static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
544 u16 reg, u16 *val)
545 {
546 return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true);
547 }
548
549 /**
550 * ixgbe_read_i2c_combined_generic_unlocked - Do I2C read combined operation
551 * @hw: pointer to the hardware structure
552 * @addr: I2C bus address to read from
553 * @reg: I2C device register to read from
554 * @val: pointer to location to receive read value
555 *
556 * Returns an error code on error.
557 **/
558 static s32
ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw * hw,u8 addr,u16 reg,u16 * val)559 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
560 u16 reg, u16 *val)
561 {
562 return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, false);
563 }
564
565 /**
566 * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
567 * @hw: pointer to the hardware structure
568 * @addr: I2C bus address to write to
569 * @reg: I2C device register to write to
570 * @val: value to write
571 *
572 * Returns an error code on error.
573 **/
ixgbe_write_i2c_combined_generic(struct ixgbe_hw * hw,u8 addr,u16 reg,u16 val)574 static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
575 u8 addr, u16 reg, u16 val)
576 {
577 return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true);
578 }
579
580 /**
581 * ixgbe_write_i2c_combined_generic_unlocked - Do I2C write combined operation
582 * @hw: pointer to the hardware structure
583 * @addr: I2C bus address to write to
584 * @reg: I2C device register to write to
585 * @val: value to write
586 *
587 * Returns an error code on error.
588 **/
589 static s32
ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw * hw,u8 addr,u16 reg,u16 val)590 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
591 u8 addr, u16 reg, u16 val)
592 {
593 return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false);
594 }
595
596 /**
597 * ixgbe_init_ops_X550EM - Inits func ptrs and MAC type
598 * @hw: pointer to hardware structure
599 *
600 * Initialize the function pointers and for MAC type X550EM.
601 * Does not touch the hardware.
602 **/
ixgbe_init_ops_X550EM(struct ixgbe_hw * hw)603 s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
604 {
605 struct ixgbe_mac_info *mac = &hw->mac;
606 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
607 struct ixgbe_phy_info *phy = &hw->phy;
608 s32 ret_val;
609
610 DEBUGFUNC("ixgbe_init_ops_X550EM");
611
612 /* Similar to X550 so start there. */
613 ret_val = ixgbe_init_ops_X550(hw);
614
615 /* Since this function eventually calls
616 * ixgbe_init_ops_540 by design, we are setting
617 * the pointers to NULL explicitly here to overwrite
618 * the values being set in the x540 function.
619 */
620 /* Thermal sensor not supported in x550EM */
621 mac->ops.get_thermal_sensor_data = NULL;
622 mac->ops.init_thermal_sensor_thresh = NULL;
623 mac->thermal_sensor_enabled = false;
624
625 /* Bypass not supported in x550EM */
626 mac->ops.bypass_rw = NULL;
627 mac->ops.bypass_valid_rd = NULL;
628 mac->ops.bypass_set = NULL;
629 mac->ops.bypass_rd_eep = NULL;
630
631 /* FCOE not supported in x550EM */
632 mac->ops.get_san_mac_addr = NULL;
633 mac->ops.set_san_mac_addr = NULL;
634 mac->ops.get_wwn_prefix = NULL;
635 mac->ops.get_fcoe_boot_status = NULL;
636
637 /* IPsec not supported in x550EM */
638 mac->ops.disable_sec_rx_path = NULL;
639 mac->ops.enable_sec_rx_path = NULL;
640
641 /* AUTOC register is not present in x550EM. */
642 mac->ops.prot_autoc_read = NULL;
643 mac->ops.prot_autoc_write = NULL;
644
645 /* X550EM bus type is internal*/
646 hw->bus.type = ixgbe_bus_type_internal;
647 mac->ops.get_bus_info = ixgbe_get_bus_info_X550em;
648
649
650 mac->ops.get_media_type = ixgbe_get_media_type_X550em;
651 mac->ops.setup_sfp = ixgbe_setup_sfp_modules_X550em;
652 mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_X550em;
653 mac->ops.reset_hw = ixgbe_reset_hw_X550em;
654 mac->ops.get_supported_physical_layer =
655 ixgbe_get_supported_physical_layer_X550em;
656
657 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
658 mac->ops.setup_fc = ixgbe_setup_fc_generic;
659 else
660 mac->ops.setup_fc = ixgbe_setup_fc_X550em;
661
662 /* PHY */
663 phy->ops.init = ixgbe_init_phy_ops_X550em;
664 switch (hw->device_id) {
665 case IXGBE_DEV_ID_X550EM_A_1G_T:
666 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
667 mac->ops.setup_fc = NULL;
668 phy->ops.identify = ixgbe_identify_phy_fw;
669 phy->ops.set_phy_power = NULL;
670 phy->ops.get_firmware_version = NULL;
671 break;
672 case IXGBE_DEV_ID_X550EM_X_1G_T:
673 mac->ops.setup_fc = NULL;
674 phy->ops.identify = ixgbe_identify_phy_x550em;
675 phy->ops.set_phy_power = NULL;
676 break;
677 default:
678 phy->ops.identify = ixgbe_identify_phy_x550em;
679 }
680
681 if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
682 phy->ops.set_phy_power = NULL;
683
684
685 /* EEPROM */
686 eeprom->ops.init_params = ixgbe_init_eeprom_params_X540;
687 eeprom->ops.read = ixgbe_read_ee_hostif_X550;
688 eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
689 eeprom->ops.write = ixgbe_write_ee_hostif_X550;
690 eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
691 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
692 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
693 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
694
695 return ret_val;
696 }
697
698 /**
699 * ixgbe_setup_fw_link - Setup firmware-controlled PHYs
700 * @hw: pointer to hardware structure
701 */
ixgbe_setup_fw_link(struct ixgbe_hw * hw)702 static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
703 {
704 u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
705 s32 rc;
706 u16 i;
707
708 if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
709 return 0;
710
711 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
712 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
713 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
714 return IXGBE_ERR_INVALID_LINK_SETTINGS;
715 }
716
717 switch (hw->fc.requested_mode) {
718 case ixgbe_fc_full:
719 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX <<
720 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
721 break;
722 case ixgbe_fc_rx_pause:
723 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX <<
724 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
725 break;
726 case ixgbe_fc_tx_pause:
727 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX <<
728 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
729 break;
730 default:
731 break;
732 }
733
734 for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
735 if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed)
736 setup[0] |= (u32)(ixgbe_fw_map[i].fw_speed);
737 }
738 setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN;
739
740 if (hw->phy.eee_speeds_advertised)
741 setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE;
742
743 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup);
744 if (rc)
745 return rc;
746 if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN)
747 return IXGBE_ERR_OVERTEMP;
748 return IXGBE_SUCCESS;
749 }
750
751 /**
752 * ixgbe_fc_autoneg_fw - Set up flow control for FW-controlled PHYs
753 * @hw: pointer to hardware structure
754 *
755 * Called at init time to set up flow control.
756 */
ixgbe_fc_autoneg_fw(struct ixgbe_hw * hw)757 static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
758 {
759 if (hw->fc.requested_mode == ixgbe_fc_default)
760 hw->fc.requested_mode = ixgbe_fc_full;
761
762 return ixgbe_setup_fw_link(hw);
763 }
764
765 /**
766 * ixgbe_setup_eee_fw - Enable/disable EEE support
767 * @hw: pointer to the HW structure
768 * @enable_eee: boolean flag to enable EEE
769 *
770 * Enable/disable EEE based on enable_eee flag.
771 * This function controls EEE for firmware-based PHY implementations.
772 */
ixgbe_setup_eee_fw(struct ixgbe_hw * hw,bool enable_eee)773 static s32 ixgbe_setup_eee_fw(struct ixgbe_hw *hw, bool enable_eee)
774 {
775 if (!!hw->phy.eee_speeds_advertised == enable_eee)
776 return IXGBE_SUCCESS;
777 if (enable_eee)
778 hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
779 else
780 hw->phy.eee_speeds_advertised = 0;
781 return hw->phy.ops.setup_link(hw);
782 }
783
784 /**
785 * ixgbe_init_ops_X550EM_a - Inits func ptrs and MAC type
786 * @hw: pointer to hardware structure
787 *
788 * Initialize the function pointers and for MAC type X550EM_a.
789 * Does not touch the hardware.
790 **/
ixgbe_init_ops_X550EM_a(struct ixgbe_hw * hw)791 s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw)
792 {
793 struct ixgbe_mac_info *mac = &hw->mac;
794 s32 ret_val;
795
796 DEBUGFUNC("ixgbe_init_ops_X550EM_a");
797
798 /* Start with generic X550EM init */
799 ret_val = ixgbe_init_ops_X550EM(hw);
800
801 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
802 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
803 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550a;
804 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550a;
805
806 switch (mac->ops.get_media_type(hw)) {
807 case ixgbe_media_type_fiber:
808 mac->ops.setup_fc = NULL;
809 mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a;
810 break;
811 case ixgbe_media_type_backplane:
812 mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a;
813 mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a;
814 break;
815 default:
816 break;
817 }
818
819 switch (hw->device_id) {
820 case IXGBE_DEV_ID_X550EM_A_1G_T:
821 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
822 mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a;
823 mac->ops.setup_fc = ixgbe_fc_autoneg_fw;
824 mac->ops.setup_eee = ixgbe_setup_eee_fw;
825 hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL |
826 IXGBE_LINK_SPEED_1GB_FULL;
827 hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
828 break;
829 default:
830 break;
831 }
832
833 return ret_val;
834 }
835
836 /**
837 * ixgbe_init_ops_X550EM_x - Inits func ptrs and MAC type
838 * @hw: pointer to hardware structure
839 *
840 * Initialize the function pointers and for MAC type X550EM_x.
841 * Does not touch the hardware.
842 **/
ixgbe_init_ops_X550EM_x(struct ixgbe_hw * hw)843 s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw)
844 {
845 struct ixgbe_mac_info *mac = &hw->mac;
846 struct ixgbe_link_info *link = &hw->link;
847 s32 ret_val;
848
849 DEBUGFUNC("ixgbe_init_ops_X550EM_x");
850
851 /* Start with generic X550EM init */
852 ret_val = ixgbe_init_ops_X550EM(hw);
853
854 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
855 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
856 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550em;
857 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550em;
858 link->ops.read_link = ixgbe_read_i2c_combined_generic;
859 link->ops.read_link_unlocked = ixgbe_read_i2c_combined_generic_unlocked;
860 link->ops.write_link = ixgbe_write_i2c_combined_generic;
861 link->ops.write_link_unlocked =
862 ixgbe_write_i2c_combined_generic_unlocked;
863 link->addr = IXGBE_CS4227;
864
865 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) {
866 mac->ops.setup_fc = NULL;
867 mac->ops.setup_eee = NULL;
868 mac->ops.init_led_link_act = NULL;
869 }
870
871 return ret_val;
872 }
873
874 /**
875 * ixgbe_dmac_config_X550
876 * @hw: pointer to hardware structure
877 *
878 * Configure DMA coalescing. If enabling dmac, dmac is activated.
879 * When disabling dmac, dmac enable dmac bit is cleared.
880 **/
ixgbe_dmac_config_X550(struct ixgbe_hw * hw)881 s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw)
882 {
883 u32 reg, high_pri_tc;
884
885 DEBUGFUNC("ixgbe_dmac_config_X550");
886
887 /* Disable DMA coalescing before configuring */
888 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
889 reg &= ~IXGBE_DMACR_DMAC_EN;
890 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
891
892 /* Disable DMA Coalescing if the watchdog timer is 0 */
893 if (!hw->mac.dmac_config.watchdog_timer)
894 goto out;
895
896 ixgbe_dmac_config_tcs_X550(hw);
897
898 /* Configure DMA Coalescing Control Register */
899 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
900
901 /* Set the watchdog timer in units of 40.96 usec */
902 reg &= ~IXGBE_DMACR_DMACWT_MASK;
903 reg |= (hw->mac.dmac_config.watchdog_timer * 100) / 4096;
904
905 reg &= ~IXGBE_DMACR_HIGH_PRI_TC_MASK;
906 /* If fcoe is enabled, set high priority traffic class */
907 if (hw->mac.dmac_config.fcoe_en) {
908 high_pri_tc = 1 << hw->mac.dmac_config.fcoe_tc;
909 reg |= ((high_pri_tc << IXGBE_DMACR_HIGH_PRI_TC_SHIFT) &
910 IXGBE_DMACR_HIGH_PRI_TC_MASK);
911 }
912 reg |= IXGBE_DMACR_EN_MNG_IND;
913
914 /* Enable DMA coalescing after configuration */
915 reg |= IXGBE_DMACR_DMAC_EN;
916 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
917
918 out:
919 return IXGBE_SUCCESS;
920 }
921
922 /**
923 * ixgbe_dmac_config_tcs_X550
924 * @hw: pointer to hardware structure
925 *
926 * Configure DMA coalescing threshold per TC. The dmac enable bit must
927 * be cleared before configuring.
928 **/
ixgbe_dmac_config_tcs_X550(struct ixgbe_hw * hw)929 s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw)
930 {
931 u32 tc, reg, pb_headroom, rx_pb_size, maxframe_size_kb;
932
933 DEBUGFUNC("ixgbe_dmac_config_tcs_X550");
934
935 /* Configure DMA coalescing enabled */
936 switch (hw->mac.dmac_config.link_speed) {
937 case IXGBE_LINK_SPEED_10_FULL:
938 case IXGBE_LINK_SPEED_100_FULL:
939 pb_headroom = IXGBE_DMACRXT_100M;
940 break;
941 case IXGBE_LINK_SPEED_1GB_FULL:
942 pb_headroom = IXGBE_DMACRXT_1G;
943 break;
944 default:
945 pb_headroom = IXGBE_DMACRXT_10G;
946 break;
947 }
948
949 maxframe_size_kb = ((IXGBE_READ_REG(hw, IXGBE_MAXFRS) >>
950 IXGBE_MHADD_MFS_SHIFT) / 1024);
951
952 /* Set the per Rx packet buffer receive threshold */
953 for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) {
954 reg = IXGBE_READ_REG(hw, IXGBE_DMCTH(tc));
955 reg &= ~IXGBE_DMCTH_DMACRXT_MASK;
956
957 if (tc < hw->mac.dmac_config.num_tcs) {
958 /* Get Rx PB size */
959 rx_pb_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc));
960 rx_pb_size = (rx_pb_size & IXGBE_RXPBSIZE_MASK) >>
961 IXGBE_RXPBSIZE_SHIFT;
962
963 /* Calculate receive buffer threshold in kilobytes */
964 if (rx_pb_size > pb_headroom)
965 rx_pb_size = rx_pb_size - pb_headroom;
966 else
967 rx_pb_size = 0;
968
969 /* Minimum of MFS shall be set for DMCTH */
970 reg |= (rx_pb_size > maxframe_size_kb) ?
971 rx_pb_size : maxframe_size_kb;
972 }
973 IXGBE_WRITE_REG(hw, IXGBE_DMCTH(tc), reg);
974 }
975 return IXGBE_SUCCESS;
976 }
977
978 /**
979 * ixgbe_dmac_update_tcs_X550
980 * @hw: pointer to hardware structure
981 *
982 * Disables dmac, updates per TC settings, and then enables dmac.
983 **/
ixgbe_dmac_update_tcs_X550(struct ixgbe_hw * hw)984 s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw)
985 {
986 u32 reg;
987
988 DEBUGFUNC("ixgbe_dmac_update_tcs_X550");
989
990 /* Disable DMA coalescing before configuring */
991 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
992 reg &= ~IXGBE_DMACR_DMAC_EN;
993 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
994
995 ixgbe_dmac_config_tcs_X550(hw);
996
997 /* Enable DMA coalescing after configuration */
998 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
999 reg |= IXGBE_DMACR_DMAC_EN;
1000 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1001
1002 return IXGBE_SUCCESS;
1003 }
1004
1005 /**
1006 * ixgbe_init_eeprom_params_X550 - Initialize EEPROM params
1007 * @hw: pointer to hardware structure
1008 *
1009 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
1010 * ixgbe_hw struct in order to set up EEPROM access.
1011 **/
ixgbe_init_eeprom_params_X550(struct ixgbe_hw * hw)1012 s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
1013 {
1014 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1015 u32 eec;
1016 u16 eeprom_size;
1017
1018 DEBUGFUNC("ixgbe_init_eeprom_params_X550");
1019
1020 if (eeprom->type == ixgbe_eeprom_uninitialized) {
1021 eeprom->semaphore_delay = 10;
1022 eeprom->type = ixgbe_flash;
1023
1024 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1025 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1026 IXGBE_EEC_SIZE_SHIFT);
1027 eeprom->word_size = 1 << (eeprom_size +
1028 IXGBE_EEPROM_WORD_SIZE_SHIFT);
1029
1030 DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
1031 eeprom->type, eeprom->word_size);
1032 }
1033
1034 return IXGBE_SUCCESS;
1035 }
1036
1037 /**
1038 * ixgbe_set_source_address_pruning_X550 - Enable/Disbale source address pruning
1039 * @hw: pointer to hardware structure
1040 * @enable: enable or disable source address pruning
1041 * @pool: Rx pool to set source address pruning for
1042 **/
ixgbe_set_source_address_pruning_X550(struct ixgbe_hw * hw,bool enable,unsigned int pool)1043 void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable,
1044 unsigned int pool)
1045 {
1046 u64 pfflp;
1047
1048 /* max rx pool is 63 */
1049 if (pool > 63)
1050 return;
1051
1052 pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL);
1053 pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32;
1054
1055 if (enable)
1056 pfflp |= (1ULL << pool);
1057 else
1058 pfflp &= ~(1ULL << pool);
1059
1060 IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp);
1061 IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32));
1062 }
1063
1064 /**
1065 * ixgbe_set_ethertype_anti_spoofing_X550 - Configure Ethertype anti-spoofing
1066 * @hw: pointer to hardware structure
1067 * @enable: enable or disable switch for Ethertype anti-spoofing
1068 * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
1069 *
1070 **/
ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw * hw,bool enable,int vf)1071 void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
1072 bool enable, int vf)
1073 {
1074 int vf_target_reg = vf >> 3;
1075 int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
1076 u32 pfvfspoof;
1077
1078 DEBUGFUNC("ixgbe_set_ethertype_anti_spoofing_X550");
1079
1080 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
1081 if (enable)
1082 pfvfspoof |= (1 << vf_target_shift);
1083 else
1084 pfvfspoof &= ~(1 << vf_target_shift);
1085
1086 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
1087 }
1088
1089 /**
1090 * ixgbe_iosf_wait - Wait for IOSF command completion
1091 * @hw: pointer to hardware structure
1092 * @ctrl: pointer to location to receive final IOSF control value
1093 *
1094 * Returns failing status on timeout
1095 *
1096 * Note: ctrl can be NULL if the IOSF control register value is not needed
1097 **/
ixgbe_iosf_wait(struct ixgbe_hw * hw,u32 * ctrl)1098 static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
1099 {
1100 u32 i, command = 0;
1101
1102 /* Check every 10 usec to see if the address cycle completed.
1103 * The SB IOSF BUSY bit will clear when the operation is
1104 * complete
1105 */
1106 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
1107 command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
1108 if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
1109 break;
1110 usec_delay(10);
1111 }
1112 if (ctrl)
1113 *ctrl = command;
1114 if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
1115 ERROR_REPORT1(IXGBE_ERROR_POLLING, "Wait timed out\n");
1116 return IXGBE_ERR_PHY;
1117 }
1118
1119 return IXGBE_SUCCESS;
1120 }
1121
1122 /**
1123 * ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register
1124 * of the IOSF device
1125 * @hw: pointer to hardware structure
1126 * @reg_addr: 32 bit PHY register to write
1127 * @device_type: 3 bit device type
1128 * @data: Data to write to the register
1129 **/
ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u32 data)1130 s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
1131 u32 device_type, u32 data)
1132 {
1133 u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
1134 u32 command, error __unused;
1135 s32 ret;
1136
1137 ret = ixgbe_acquire_swfw_semaphore(hw, gssr);
1138 if (ret != IXGBE_SUCCESS)
1139 return ret;
1140
1141 ret = ixgbe_iosf_wait(hw, NULL);
1142 if (ret != IXGBE_SUCCESS)
1143 goto out;
1144
1145 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
1146 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
1147
1148 /* Write IOSF control register */
1149 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
1150
1151 /* Write IOSF data register */
1152 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
1153
1154 ret = ixgbe_iosf_wait(hw, &command);
1155
1156 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
1157 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
1158 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
1159 ERROR_REPORT2(IXGBE_ERROR_POLLING,
1160 "Failed to write, error %x\n", error);
1161 ret = IXGBE_ERR_PHY;
1162 }
1163
1164 out:
1165 ixgbe_release_swfw_semaphore(hw, gssr);
1166 return ret;
1167 }
1168
1169 /**
1170 * ixgbe_read_iosf_sb_reg_x550 - Reads specified register of the IOSF device
1171 * @hw: pointer to hardware structure
1172 * @reg_addr: 32 bit PHY register to write
1173 * @device_type: 3 bit device type
1174 * @data: Pointer to read data from the register
1175 **/
ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u32 * data)1176 s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
1177 u32 device_type, u32 *data)
1178 {
1179 u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
1180 u32 command, error __unused;
1181 s32 ret;
1182
1183 ret = ixgbe_acquire_swfw_semaphore(hw, gssr);
1184 if (ret != IXGBE_SUCCESS)
1185 return ret;
1186
1187 ret = ixgbe_iosf_wait(hw, NULL);
1188 if (ret != IXGBE_SUCCESS)
1189 goto out;
1190
1191 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
1192 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
1193
1194 /* Write IOSF control register */
1195 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
1196
1197 ret = ixgbe_iosf_wait(hw, &command);
1198
1199 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
1200 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
1201 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
1202 ERROR_REPORT2(IXGBE_ERROR_POLLING,
1203 "Failed to read, error %x\n", error);
1204 ret = IXGBE_ERR_PHY;
1205 }
1206
1207 if (ret == IXGBE_SUCCESS)
1208 *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
1209
1210 out:
1211 ixgbe_release_swfw_semaphore(hw, gssr);
1212 return ret;
1213 }
1214
1215 /**
1216 * ixgbe_get_phy_token - Get the token for shared phy access
1217 * @hw: Pointer to hardware structure
1218 */
1219
ixgbe_get_phy_token(struct ixgbe_hw * hw)1220 s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
1221 {
1222 struct ixgbe_hic_phy_token_req token_cmd;
1223 s32 status;
1224
1225 token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
1226 token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
1227 token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
1228 token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1229 token_cmd.port_number = hw->bus.lan_id;
1230 token_cmd.command_type = FW_PHY_TOKEN_REQ;
1231 token_cmd.pad = 0;
1232 status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd,
1233 sizeof(token_cmd),
1234 IXGBE_HI_COMMAND_TIMEOUT,
1235 true);
1236 if (status) {
1237 DEBUGOUT1("Issuing host interface command failed with Status = %d\n",
1238 status);
1239 return status;
1240 }
1241 if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
1242 return IXGBE_SUCCESS;
1243 if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY) {
1244 DEBUGOUT1("Host interface command returned 0x%08x , returning IXGBE_ERR_FW_RESP_INVALID\n",
1245 token_cmd.hdr.cmd_or_resp.ret_status);
1246 return IXGBE_ERR_FW_RESP_INVALID;
1247 }
1248
1249 DEBUGOUT("Returning IXGBE_ERR_TOKEN_RETRY\n");
1250 return IXGBE_ERR_TOKEN_RETRY;
1251 }
1252
1253 /**
1254 * ixgbe_put_phy_token - Put the token for shared phy access
1255 * @hw: Pointer to hardware structure
1256 */
1257
ixgbe_put_phy_token(struct ixgbe_hw * hw)1258 s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
1259 {
1260 struct ixgbe_hic_phy_token_req token_cmd;
1261 s32 status;
1262
1263 token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
1264 token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
1265 token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
1266 token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1267 token_cmd.port_number = hw->bus.lan_id;
1268 token_cmd.command_type = FW_PHY_TOKEN_REL;
1269 token_cmd.pad = 0;
1270 status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd,
1271 sizeof(token_cmd),
1272 IXGBE_HI_COMMAND_TIMEOUT,
1273 true);
1274 if (status)
1275 return status;
1276 if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
1277 return IXGBE_SUCCESS;
1278
1279 DEBUGOUT("Put PHY Token host interface command failed");
1280 return IXGBE_ERR_FW_RESP_INVALID;
1281 }
1282
1283 /**
1284 * ixgbe_disable_mdd_X550
1285 * @hw: pointer to hardware structure
1286 *
1287 * Disable malicious driver detection
1288 **/
ixgbe_disable_mdd_X550(struct ixgbe_hw * hw)1289 void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw)
1290 {
1291 u32 reg;
1292
1293 DEBUGFUNC("ixgbe_disable_mdd_X550");
1294
1295 /* Disable MDD for TX DMA and interrupt */
1296 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1297 reg &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
1298 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1299
1300 /* Disable MDD for RX and interrupt */
1301 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1302 reg &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
1303 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
1304 }
1305
1306 /**
1307 * ixgbe_enable_mdd_X550
1308 * @hw: pointer to hardware structure
1309 *
1310 * Enable malicious driver detection
1311 **/
ixgbe_enable_mdd_X550(struct ixgbe_hw * hw)1312 void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw)
1313 {
1314 u32 reg;
1315
1316 DEBUGFUNC("ixgbe_enable_mdd_X550");
1317
1318 /* Enable MDD for TX DMA and interrupt */
1319 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1320 reg |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
1321 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1322
1323 /* Enable MDD for RX and interrupt */
1324 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1325 reg |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
1326 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
1327 }
1328
1329 /**
1330 * ixgbe_restore_mdd_vf_X550
1331 * @hw: pointer to hardware structure
1332 * @vf: vf index
1333 *
1334 * Restore VF that was disabled during malicious driver detection event
1335 **/
ixgbe_restore_mdd_vf_X550(struct ixgbe_hw * hw,u32 vf)1336 void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf)
1337 {
1338 u32 idx, reg, num_qs, start_q, bitmask;
1339
1340 DEBUGFUNC("ixgbe_restore_mdd_vf_X550");
1341
1342 /* Map VF to queues */
1343 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
1344 switch (reg & IXGBE_MRQC_MRQE_MASK) {
1345 case IXGBE_MRQC_VMDQRT8TCEN:
1346 num_qs = 8; /* 16 VFs / pools */
1347 bitmask = 0x000000FF;
1348 break;
1349 case IXGBE_MRQC_VMDQRSS32EN:
1350 case IXGBE_MRQC_VMDQRT4TCEN:
1351 num_qs = 4; /* 32 VFs / pools */
1352 bitmask = 0x0000000F;
1353 break;
1354 default: /* 64 VFs / pools */
1355 num_qs = 2;
1356 bitmask = 0x00000003;
1357 break;
1358 }
1359 start_q = vf * num_qs;
1360
1361 /* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */
1362 idx = start_q / 32;
1363 reg = 0;
1364 reg |= (bitmask << (start_q % 32));
1365 IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), reg);
1366 IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), reg);
1367 }
1368
1369 /**
1370 * ixgbe_mdd_event_X550
1371 * @hw: pointer to hardware structure
1372 * @vf_bitmap: vf bitmap of malicious vfs
1373 *
1374 * Handle malicious driver detection event.
1375 **/
ixgbe_mdd_event_X550(struct ixgbe_hw * hw,u32 * vf_bitmap)1376 void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap)
1377 {
1378 u32 wqbr;
1379 u32 i, j, reg, q, shift, vf, idx;
1380
1381 DEBUGFUNC("ixgbe_mdd_event_X550");
1382
1383 /* figure out pool size for mapping to vf's */
1384 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
1385 switch (reg & IXGBE_MRQC_MRQE_MASK) {
1386 case IXGBE_MRQC_VMDQRT8TCEN:
1387 shift = 3; /* 16 VFs / pools */
1388 break;
1389 case IXGBE_MRQC_VMDQRSS32EN:
1390 case IXGBE_MRQC_VMDQRT4TCEN:
1391 shift = 2; /* 32 VFs / pools */
1392 break;
1393 default:
1394 shift = 1; /* 64 VFs / pools */
1395 break;
1396 }
1397
1398 /* Read WQBR_TX and WQBR_RX and check for malicious queues */
1399 for (i = 0; i < 4; i++) {
1400 wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i));
1401 wqbr |= IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i));
1402
1403 if (!wqbr)
1404 continue;
1405
1406 /* Get malicious queue */
1407 for (j = 0; j < 32 && wqbr; j++) {
1408
1409 if (!(wqbr & (1 << j)))
1410 continue;
1411
1412 /* Get queue from bitmask */
1413 q = j + (i * 32);
1414
1415 /* Map queue to vf */
1416 vf = (q >> shift);
1417
1418 /* Set vf bit in vf_bitmap */
1419 idx = vf / 32;
1420 vf_bitmap[idx] |= (1 << (vf % 32));
1421 wqbr &= ~(1 << j);
1422 }
1423 }
1424 }
1425
1426 /**
1427 * ixgbe_get_media_type_X550em - Get media type
1428 * @hw: pointer to hardware structure
1429 *
1430 * Returns the media type (fiber, copper, backplane)
1431 */
ixgbe_get_media_type_X550em(struct ixgbe_hw * hw)1432 enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
1433 {
1434 enum ixgbe_media_type media_type;
1435
1436 DEBUGFUNC("ixgbe_get_media_type_X550em");
1437
1438 /* Detect if there is a copper PHY attached. */
1439 switch (hw->device_id) {
1440 case IXGBE_DEV_ID_X550EM_X_KR:
1441 case IXGBE_DEV_ID_X550EM_X_KX4:
1442 case IXGBE_DEV_ID_X550EM_X_XFI:
1443 case IXGBE_DEV_ID_X550EM_A_KR:
1444 case IXGBE_DEV_ID_X550EM_A_KR_L:
1445 media_type = ixgbe_media_type_backplane;
1446 break;
1447 case IXGBE_DEV_ID_X550EM_X_SFP:
1448 case IXGBE_DEV_ID_X550EM_A_SFP:
1449 case IXGBE_DEV_ID_X550EM_A_SFP_N:
1450 case IXGBE_DEV_ID_X550EM_A_QSFP:
1451 case IXGBE_DEV_ID_X550EM_A_QSFP_N:
1452 media_type = ixgbe_media_type_fiber;
1453 break;
1454 case IXGBE_DEV_ID_X550EM_X_1G_T:
1455 case IXGBE_DEV_ID_X550EM_X_10G_T:
1456 case IXGBE_DEV_ID_X550EM_A_10G_T:
1457 media_type = ixgbe_media_type_copper;
1458 break;
1459 case IXGBE_DEV_ID_X550EM_A_SGMII:
1460 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
1461 media_type = ixgbe_media_type_backplane;
1462 hw->phy.type = ixgbe_phy_sgmii;
1463 break;
1464 case IXGBE_DEV_ID_X550EM_A_1G_T:
1465 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
1466 media_type = ixgbe_media_type_copper;
1467 break;
1468 default:
1469 media_type = ixgbe_media_type_unknown;
1470 break;
1471 }
1472 return media_type;
1473 }
1474
1475 /**
1476 * ixgbe_supported_sfp_modules_X550em - Check if SFP module type is supported
1477 * @hw: pointer to hardware structure
1478 * @linear: true if SFP module is linear
1479 */
ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw * hw,bool * linear)1480 static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
1481 {
1482 DEBUGFUNC("ixgbe_supported_sfp_modules_X550em");
1483
1484 switch (hw->phy.sfp_type) {
1485 case ixgbe_sfp_type_not_present:
1486 return IXGBE_ERR_SFP_NOT_PRESENT;
1487 case ixgbe_sfp_type_da_cu_core0:
1488 case ixgbe_sfp_type_da_cu_core1:
1489 *linear = true;
1490 break;
1491 case ixgbe_sfp_type_srlr_core0:
1492 case ixgbe_sfp_type_srlr_core1:
1493 case ixgbe_sfp_type_da_act_lmt_core0:
1494 case ixgbe_sfp_type_da_act_lmt_core1:
1495 case ixgbe_sfp_type_1g_sx_core0:
1496 case ixgbe_sfp_type_1g_sx_core1:
1497 case ixgbe_sfp_type_1g_lx_core0:
1498 case ixgbe_sfp_type_1g_lx_core1:
1499 *linear = false;
1500 break;
1501 case ixgbe_sfp_type_unknown:
1502 case ixgbe_sfp_type_1g_cu_core0:
1503 case ixgbe_sfp_type_1g_cu_core1:
1504 default:
1505 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1506 }
1507
1508 return IXGBE_SUCCESS;
1509 }
1510
1511 /**
1512 * ixgbe_identify_sfp_module_X550em - Identifies SFP modules
1513 * @hw: pointer to hardware structure
1514 *
1515 * Searches for and identifies the SFP module and assigns appropriate PHY type.
1516 **/
ixgbe_identify_sfp_module_X550em(struct ixgbe_hw * hw)1517 s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw)
1518 {
1519 s32 status;
1520 bool linear;
1521
1522 DEBUGFUNC("ixgbe_identify_sfp_module_X550em");
1523
1524 status = ixgbe_identify_module_generic(hw);
1525
1526 if (status != IXGBE_SUCCESS)
1527 return status;
1528
1529 /* Check if SFP module is supported */
1530 status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
1531
1532 return status;
1533 }
1534
1535 /**
1536 * ixgbe_setup_sfp_modules_X550em - Setup MAC link ops
1537 * @hw: pointer to hardware structure
1538 */
ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw * hw)1539 s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
1540 {
1541 s32 status;
1542 bool linear;
1543
1544 DEBUGFUNC("ixgbe_setup_sfp_modules_X550em");
1545
1546 /* Check if SFP module is supported */
1547 status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
1548
1549 if (status != IXGBE_SUCCESS)
1550 return status;
1551
1552 ixgbe_init_mac_link_ops_X550em(hw);
1553 hw->phy.ops.reset = NULL;
1554
1555 return IXGBE_SUCCESS;
1556 }
1557
1558 /**
1559 * ixgbe_restart_an_internal_phy_x550em - restart autonegotiation for the
1560 * internal PHY
1561 * @hw: pointer to hardware structure
1562 **/
ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw * hw)1563 static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
1564 {
1565 s32 status;
1566 u32 link_ctrl;
1567
1568 /* Restart auto-negotiation. */
1569 status = hw->mac.ops.read_iosf_sb_reg(hw,
1570 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1571 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl);
1572
1573 if (status) {
1574 DEBUGOUT("Auto-negotiation did not complete\n");
1575 return status;
1576 }
1577
1578 link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
1579 status = hw->mac.ops.write_iosf_sb_reg(hw,
1580 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1581 IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl);
1582
1583 if (hw->mac.type == ixgbe_mac_X550EM_a) {
1584 u32 flx_mask_st20;
1585
1586 /* Indicate to FW that AN restart has been asserted */
1587 status = hw->mac.ops.read_iosf_sb_reg(hw,
1588 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1589 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_mask_st20);
1590
1591 if (status) {
1592 DEBUGOUT("Auto-negotiation did not complete\n");
1593 return status;
1594 }
1595
1596 flx_mask_st20 |= IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART;
1597 status = hw->mac.ops.write_iosf_sb_reg(hw,
1598 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1599 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_mask_st20);
1600 }
1601
1602 return status;
1603 }
1604
1605 /**
1606 * ixgbe_setup_sgmii - Set up link for sgmii
1607 * @hw: pointer to hardware structure
1608 * @speed: new link speed
1609 * @autoneg_wait: true when waiting for completion is needed
1610 */
ixgbe_setup_sgmii(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait)1611 static s32 ixgbe_setup_sgmii(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1612 bool autoneg_wait)
1613 {
1614 struct ixgbe_mac_info *mac = &hw->mac;
1615 u32 lval, sval, flx_val;
1616 s32 rc;
1617
1618 rc = mac->ops.read_iosf_sb_reg(hw,
1619 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1620 IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
1621 if (rc)
1622 return rc;
1623
1624 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1625 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1626 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
1627 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
1628 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1629 rc = mac->ops.write_iosf_sb_reg(hw,
1630 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1631 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1632 if (rc)
1633 return rc;
1634
1635 rc = mac->ops.read_iosf_sb_reg(hw,
1636 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1637 IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
1638 if (rc)
1639 return rc;
1640
1641 sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
1642 sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
1643 rc = mac->ops.write_iosf_sb_reg(hw,
1644 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1645 IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
1646 if (rc)
1647 return rc;
1648
1649 rc = mac->ops.read_iosf_sb_reg(hw,
1650 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1651 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
1652 if (rc)
1653 return rc;
1654
1655 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
1656 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
1657 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
1658 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
1659 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
1660
1661 rc = mac->ops.write_iosf_sb_reg(hw,
1662 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1663 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
1664 if (rc)
1665 return rc;
1666
1667 rc = ixgbe_restart_an_internal_phy_x550em(hw);
1668 if (rc)
1669 return rc;
1670
1671 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
1672 }
1673
1674 /**
1675 * ixgbe_setup_sgmii_fw - Set up link for internal PHY SGMII auto-negotiation
1676 * @hw: pointer to hardware structure
1677 * @speed: new link speed
1678 * @autoneg_wait: true when waiting for completion is needed
1679 */
ixgbe_setup_sgmii_fw(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait)1680 static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1681 bool autoneg_wait)
1682 {
1683 struct ixgbe_mac_info *mac = &hw->mac;
1684 u32 lval, sval, flx_val;
1685 s32 rc;
1686
1687 rc = mac->ops.read_iosf_sb_reg(hw,
1688 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1689 IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
1690 if (rc)
1691 return rc;
1692
1693 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1694 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1695 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
1696 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
1697 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1698 rc = mac->ops.write_iosf_sb_reg(hw,
1699 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1700 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1701 if (rc)
1702 return rc;
1703
1704 rc = mac->ops.read_iosf_sb_reg(hw,
1705 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1706 IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
1707 if (rc)
1708 return rc;
1709
1710 sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
1711 sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
1712 rc = mac->ops.write_iosf_sb_reg(hw,
1713 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1714 IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
1715 if (rc)
1716 return rc;
1717
1718 rc = mac->ops.write_iosf_sb_reg(hw,
1719 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1720 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1721 if (rc)
1722 return rc;
1723
1724 rc = mac->ops.read_iosf_sb_reg(hw,
1725 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1726 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
1727 if (rc)
1728 return rc;
1729
1730 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
1731 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
1732 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
1733 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
1734 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
1735
1736 rc = mac->ops.write_iosf_sb_reg(hw,
1737 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1738 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
1739 if (rc)
1740 return rc;
1741
1742 rc = ixgbe_restart_an_internal_phy_x550em(hw);
1743
1744 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
1745 }
1746
1747 /**
1748 * ixgbe_init_mac_link_ops_X550em - init mac link function pointers
1749 * @hw: pointer to hardware structure
1750 */
ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw * hw)1751 void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
1752 {
1753 struct ixgbe_mac_info *mac = &hw->mac;
1754
1755 DEBUGFUNC("ixgbe_init_mac_link_ops_X550em");
1756
1757 switch (hw->mac.ops.get_media_type(hw)) {
1758 case ixgbe_media_type_fiber:
1759 /* CS4227 does not support autoneg, so disable the laser control
1760 * functions for SFP+ fiber
1761 */
1762 mac->ops.disable_tx_laser = NULL;
1763 mac->ops.enable_tx_laser = NULL;
1764 mac->ops.flap_tx_laser = NULL;
1765 mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
1766 mac->ops.set_rate_select_speed =
1767 ixgbe_set_soft_rate_select_speed;
1768
1769 if ((hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) ||
1770 (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP))
1771 mac->ops.setup_mac_link =
1772 ixgbe_setup_mac_link_sfp_x550a;
1773 else
1774 mac->ops.setup_mac_link =
1775 ixgbe_setup_mac_link_sfp_x550em;
1776 break;
1777 case ixgbe_media_type_copper:
1778 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T)
1779 break;
1780 if (hw->mac.type == ixgbe_mac_X550EM_a) {
1781 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
1782 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
1783 mac->ops.setup_link = ixgbe_setup_sgmii_fw;
1784 mac->ops.check_link =
1785 ixgbe_check_mac_link_generic;
1786 } else {
1787 mac->ops.setup_link =
1788 ixgbe_setup_mac_link_t_X550em;
1789 }
1790 } else {
1791 mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
1792 mac->ops.check_link = ixgbe_check_link_t_X550em;
1793 }
1794 break;
1795 case ixgbe_media_type_backplane:
1796 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
1797 hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
1798 mac->ops.setup_link = ixgbe_setup_sgmii;
1799 break;
1800 default:
1801 break;
1802 }
1803 }
1804
1805 /**
1806 * ixgbe_get_link_capabilities_X550em - Determines link capabilities
1807 * @hw: pointer to hardware structure
1808 * @speed: pointer to link speed
1809 * @autoneg: true when autoneg or autotry is enabled
1810 */
ixgbe_get_link_capabilities_X550em(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * autoneg)1811 s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
1812 ixgbe_link_speed *speed,
1813 bool *autoneg)
1814 {
1815 DEBUGFUNC("ixgbe_get_link_capabilities_X550em");
1816
1817
1818 if (hw->phy.type == ixgbe_phy_fw) {
1819 *autoneg = true;
1820 *speed = hw->phy.speeds_supported;
1821 return 0;
1822 }
1823
1824 /* SFP */
1825 if (hw->phy.media_type == ixgbe_media_type_fiber) {
1826
1827 /* CS4227 SFP must not enable auto-negotiation */
1828 *autoneg = false;
1829
1830 /* Check if 1G SFP module. */
1831 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
1832 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1
1833 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
1834 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
1835 *speed = IXGBE_LINK_SPEED_1GB_FULL;
1836 return IXGBE_SUCCESS;
1837 }
1838
1839 /* Link capabilities are based on SFP */
1840 if (hw->phy.multispeed_fiber)
1841 *speed = IXGBE_LINK_SPEED_10GB_FULL |
1842 IXGBE_LINK_SPEED_1GB_FULL;
1843 else
1844 *speed = IXGBE_LINK_SPEED_10GB_FULL;
1845 } else {
1846 *autoneg = true;
1847
1848 switch (hw->phy.type) {
1849 case ixgbe_phy_x550em_xfi:
1850 *speed = IXGBE_LINK_SPEED_1GB_FULL |
1851 IXGBE_LINK_SPEED_10GB_FULL;
1852 *autoneg = false;
1853 break;
1854 case ixgbe_phy_ext_1g_t:
1855 case ixgbe_phy_sgmii:
1856 *speed = IXGBE_LINK_SPEED_1GB_FULL;
1857 break;
1858 case ixgbe_phy_x550em_kr:
1859 if (hw->mac.type == ixgbe_mac_X550EM_a) {
1860 /* check different backplane modes */
1861 if (hw->phy.nw_mng_if_sel &
1862 IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
1863 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
1864 break;
1865 } else if (hw->device_id ==
1866 IXGBE_DEV_ID_X550EM_A_KR_L) {
1867 *speed = IXGBE_LINK_SPEED_1GB_FULL;
1868 break;
1869 }
1870 }
1871 *speed = IXGBE_LINK_SPEED_10GB_FULL |
1872 IXGBE_LINK_SPEED_1GB_FULL;
1873 break;
1874 default:
1875 *speed = IXGBE_LINK_SPEED_10GB_FULL |
1876 IXGBE_LINK_SPEED_1GB_FULL;
1877 break;
1878 }
1879 }
1880
1881 return IXGBE_SUCCESS;
1882 }
1883
1884 /**
1885 * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause
1886 * @hw: pointer to hardware structure
1887 * @lsc: pointer to boolean flag which indicates whether external Base T
1888 * PHY interrupt is lsc
1889 *
1890 * Determime if external Base T PHY interrupt cause is high temperature
1891 * failure alarm or link status change.
1892 *
1893 * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
1894 * failure alarm, else return PHY access status.
1895 */
ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw * hw,bool * lsc)1896 static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
1897 {
1898 u32 status;
1899 u16 reg;
1900
1901 *lsc = false;
1902
1903 /* Vendor alarm triggered */
1904 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
1905 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
1906 ®);
1907
1908 if (status != IXGBE_SUCCESS ||
1909 !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN))
1910 return status;
1911
1912 /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */
1913 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG,
1914 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
1915 ®);
1916
1917 if (status != IXGBE_SUCCESS ||
1918 !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
1919 IXGBE_MDIO_GLOBAL_ALARM_1_INT)))
1920 return status;
1921
1922 /* Global alarm triggered */
1923 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1,
1924 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
1925 ®);
1926
1927 if (status != IXGBE_SUCCESS)
1928 return status;
1929
1930 /* If high temperature failure, then return over temp error and exit */
1931 if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) {
1932 /* power down the PHY in case the PHY FW didn't already */
1933 ixgbe_set_copper_phy_power(hw, false);
1934 return IXGBE_ERR_OVERTEMP;
1935 } else if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) {
1936 /* device fault alarm triggered */
1937 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG,
1938 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
1939 ®);
1940
1941 if (status != IXGBE_SUCCESS)
1942 return status;
1943
1944 /* if device fault was due to high temp alarm handle and exit */
1945 if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) {
1946 /* power down the PHY in case the PHY FW didn't */
1947 ixgbe_set_copper_phy_power(hw, false);
1948 return IXGBE_ERR_OVERTEMP;
1949 }
1950 }
1951
1952 /* Vendor alarm 2 triggered */
1953 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
1954 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
1955
1956 if (status != IXGBE_SUCCESS ||
1957 !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT))
1958 return status;
1959
1960 /* link connect/disconnect event occurred */
1961 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2,
1962 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
1963
1964 if (status != IXGBE_SUCCESS)
1965 return status;
1966
1967 /* Indicate LSC */
1968 if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC)
1969 *lsc = true;
1970
1971 return IXGBE_SUCCESS;
1972 }
1973
1974 /**
1975 * ixgbe_enable_lasi_ext_t_x550em - Enable external Base T PHY interrupts
1976 * @hw: pointer to hardware structure
1977 *
1978 * Enable link status change and temperature failure alarm for the external
1979 * Base T PHY
1980 *
1981 * Returns PHY access status
1982 */
ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw * hw)1983 static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
1984 {
1985 u32 status;
1986 u16 reg;
1987 bool lsc;
1988
1989 /* Clear interrupt flags */
1990 status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
1991
1992 /* Enable link status change alarm */
1993
1994 /* Enable the LASI interrupts on X552 devices to receive notifications
1995 * of the link configurations of the external PHY and correspondingly
1996 * support the configuration of the internal iXFI link, since iXFI does
1997 * not support auto-negotiation. This is not required for X553 devices
1998 * having KR support, which performs auto-negotiations and which is used
1999 * as the internal link to the external PHY. Hence adding a check here
2000 * to avoid enabling LASI interrupts for X553 devices.
2001 */
2002 if (hw->mac.type != ixgbe_mac_X550EM_a) {
2003 status = hw->phy.ops.read_reg(hw,
2004 IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
2005 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2006
2007 if (status != IXGBE_SUCCESS)
2008 return status;
2009
2010 reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN;
2011
2012 status = hw->phy.ops.write_reg(hw,
2013 IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
2014 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg);
2015
2016 if (status != IXGBE_SUCCESS)
2017 return status;
2018 }
2019
2020 /* Enable high temperature failure and global fault alarms */
2021 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
2022 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2023 ®);
2024
2025 if (status != IXGBE_SUCCESS)
2026 return status;
2027
2028 reg |= (IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN |
2029 IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN);
2030
2031 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
2032 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2033 reg);
2034
2035 if (status != IXGBE_SUCCESS)
2036 return status;
2037
2038 /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */
2039 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
2040 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2041 ®);
2042
2043 if (status != IXGBE_SUCCESS)
2044 return status;
2045
2046 reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
2047 IXGBE_MDIO_GLOBAL_ALARM_1_INT);
2048
2049 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
2050 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2051 reg);
2052
2053 if (status != IXGBE_SUCCESS)
2054 return status;
2055
2056 /* Enable chip-wide vendor alarm */
2057 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
2058 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2059 ®);
2060
2061 if (status != IXGBE_SUCCESS)
2062 return status;
2063
2064 reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN;
2065
2066 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
2067 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2068 reg);
2069
2070 return status;
2071 }
2072
2073 /**
2074 * ixgbe_setup_kr_speed_x550em - Configure the KR PHY for link speed.
2075 * @hw: pointer to hardware structure
2076 * @speed: link speed
2077 *
2078 * Configures the integrated KR PHY.
2079 **/
ixgbe_setup_kr_speed_x550em(struct ixgbe_hw * hw,ixgbe_link_speed speed)2080 static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
2081 ixgbe_link_speed speed)
2082 {
2083 s32 status;
2084 u32 reg_val;
2085
2086 status = hw->mac.ops.read_iosf_sb_reg(hw,
2087 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2088 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2089 if (status)
2090 return status;
2091
2092 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
2093 reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
2094 IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
2095
2096 /* Advertise 10G support. */
2097 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
2098 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
2099
2100 /* Advertise 1G support. */
2101 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
2102 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
2103
2104 status = hw->mac.ops.write_iosf_sb_reg(hw,
2105 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2106 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2107
2108 if (hw->mac.type == ixgbe_mac_X550EM_a) {
2109 /* Set lane mode to KR auto negotiation */
2110 status = hw->mac.ops.read_iosf_sb_reg(hw,
2111 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2112 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2113
2114 if (status)
2115 return status;
2116
2117 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2118 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
2119 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2120 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2121 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2122
2123 status = hw->mac.ops.write_iosf_sb_reg(hw,
2124 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2125 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2126 }
2127
2128 return ixgbe_restart_an_internal_phy_x550em(hw);
2129 }
2130
2131 /**
2132 * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs
2133 * @hw: pointer to hardware structure
2134 */
ixgbe_reset_phy_fw(struct ixgbe_hw * hw)2135 static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
2136 {
2137 u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
2138 s32 rc;
2139
2140 if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
2141 return IXGBE_SUCCESS;
2142
2143 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store);
2144 if (rc)
2145 return rc;
2146 memset(store, 0, sizeof(store));
2147
2148 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store);
2149 if (rc)
2150 return rc;
2151
2152 return ixgbe_setup_fw_link(hw);
2153 }
2154
2155 /**
2156 * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp
2157 * @hw: pointer to hardware structure
2158 */
ixgbe_check_overtemp_fw(struct ixgbe_hw * hw)2159 static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
2160 {
2161 u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
2162 s32 rc;
2163
2164 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store);
2165 if (rc)
2166 return rc;
2167
2168 if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) {
2169 ixgbe_shutdown_fw_phy(hw);
2170 return IXGBE_ERR_OVERTEMP;
2171 }
2172 return IXGBE_SUCCESS;
2173 }
2174
2175 /**
2176 * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register
2177 * @hw: pointer to hardware structure
2178 *
2179 * Read NW_MNG_IF_SEL register and save field values, and check for valid field
2180 * values.
2181 **/
ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw * hw)2182 static s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
2183 {
2184 /* Save NW management interface connected on board. This is used
2185 * to determine internal PHY mode.
2186 */
2187 hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
2188
2189 /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set
2190 * PHY address. This register field was has only been used for X552.
2191 */
2192 if (hw->mac.type == ixgbe_mac_X550EM_a &&
2193 hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) {
2194 hw->phy.addr = (hw->phy.nw_mng_if_sel &
2195 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
2196 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
2197 }
2198
2199 return IXGBE_SUCCESS;
2200 }
2201
2202 /**
2203 * ixgbe_init_phy_ops_X550em - PHY/SFP specific init
2204 * @hw: pointer to hardware structure
2205 *
2206 * Initialize any function pointers that were not able to be
2207 * set during init_shared_code because the PHY/SFP type was
2208 * not known. Perform the SFP init if necessary.
2209 */
ixgbe_init_phy_ops_X550em(struct ixgbe_hw * hw)2210 s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
2211 {
2212 struct ixgbe_phy_info *phy = &hw->phy;
2213 s32 ret_val;
2214
2215 DEBUGFUNC("ixgbe_init_phy_ops_X550em");
2216
2217 hw->mac.ops.set_lan_id(hw);
2218 ixgbe_read_mng_if_sel_x550em(hw);
2219
2220 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
2221 phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
2222 ixgbe_setup_mux_ctl(hw);
2223 phy->ops.identify_sfp = ixgbe_identify_sfp_module_X550em;
2224 }
2225
2226 switch (hw->device_id) {
2227 case IXGBE_DEV_ID_X550EM_A_1G_T:
2228 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2229 phy->ops.read_reg_mdi = NULL;
2230 phy->ops.write_reg_mdi = NULL;
2231 hw->phy.ops.read_reg = NULL;
2232 hw->phy.ops.write_reg = NULL;
2233 phy->ops.check_overtemp = ixgbe_check_overtemp_fw;
2234 if (hw->bus.lan_id)
2235 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
2236 else
2237 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
2238
2239 break;
2240 case IXGBE_DEV_ID_X550EM_A_10G_T:
2241 case IXGBE_DEV_ID_X550EM_A_SFP:
2242 hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
2243 hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
2244 if (hw->bus.lan_id)
2245 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
2246 else
2247 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
2248 break;
2249 case IXGBE_DEV_ID_X550EM_X_SFP:
2250 /* set up for CS4227 usage */
2251 hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
2252 break;
2253 case IXGBE_DEV_ID_X550EM_X_1G_T:
2254 phy->ops.read_reg_mdi = NULL;
2255 phy->ops.write_reg_mdi = NULL;
2256 default:
2257 break;
2258 }
2259
2260 /* Identify the PHY or SFP module */
2261 ret_val = phy->ops.identify(hw);
2262 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED ||
2263 ret_val == IXGBE_ERR_PHY_ADDR_INVALID)
2264 return ret_val;
2265
2266 /* Setup function pointers based on detected hardware */
2267 ixgbe_init_mac_link_ops_X550em(hw);
2268 if (phy->sfp_type != ixgbe_sfp_type_unknown)
2269 phy->ops.reset = NULL;
2270
2271 /* Set functions pointers based on phy type */
2272 switch (hw->phy.type) {
2273 case ixgbe_phy_x550em_kx4:
2274 phy->ops.setup_link = NULL;
2275 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2276 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2277 break;
2278 case ixgbe_phy_x550em_kr:
2279 phy->ops.setup_link = ixgbe_setup_kr_x550em;
2280 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2281 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2282 break;
2283 case ixgbe_phy_ext_1g_t:
2284 /* link is managed by FW */
2285 phy->ops.setup_link = NULL;
2286 phy->ops.reset = NULL;
2287 break;
2288 case ixgbe_phy_x550em_xfi:
2289 /* link is managed by HW */
2290 phy->ops.setup_link = NULL;
2291 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2292 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2293 break;
2294 case ixgbe_phy_x550em_ext_t:
2295 /* If internal link mode is XFI, then setup iXFI internal link,
2296 * else setup KR now.
2297 */
2298 phy->ops.setup_internal_link =
2299 ixgbe_setup_internal_phy_t_x550em;
2300
2301 /* setup SW LPLU only for first revision of X550EM_x */
2302 if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
2303 !(IXGBE_FUSES0_REV_MASK &
2304 IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
2305 phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em;
2306
2307 phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
2308 phy->ops.reset = ixgbe_reset_phy_t_X550em;
2309 break;
2310 case ixgbe_phy_sgmii:
2311 phy->ops.setup_link = NULL;
2312 break;
2313 case ixgbe_phy_fw:
2314 phy->ops.setup_link = ixgbe_setup_fw_link;
2315 phy->ops.reset = ixgbe_reset_phy_fw;
2316 break;
2317 default:
2318 break;
2319 }
2320 return ret_val;
2321 }
2322
2323 /**
2324 * ixgbe_set_mdio_speed - Set MDIO clock speed
2325 * @hw: pointer to hardware structure
2326 */
ixgbe_set_mdio_speed(struct ixgbe_hw * hw)2327 static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
2328 {
2329 u32 hlreg0;
2330
2331 switch (hw->device_id) {
2332 case IXGBE_DEV_ID_X550EM_X_10G_T:
2333 case IXGBE_DEV_ID_X550EM_A_SGMII:
2334 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
2335 case IXGBE_DEV_ID_X550EM_A_10G_T:
2336 case IXGBE_DEV_ID_X550EM_A_SFP:
2337 case IXGBE_DEV_ID_X550EM_A_QSFP:
2338 /* Config MDIO clock speed before the first MDIO PHY access */
2339 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2340 hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
2341 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2342 break;
2343 case IXGBE_DEV_ID_X550EM_A_1G_T:
2344 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2345 /* Select fast MDIO clock speed for these devices */
2346 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2347 hlreg0 |= IXGBE_HLREG0_MDCSPD;
2348 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2349 break;
2350 default:
2351 break;
2352 }
2353 }
2354
2355 /**
2356 * ixgbe_reset_hw_X550em - Perform hardware reset
2357 * @hw: pointer to hardware structure
2358 *
2359 * Resets the hardware by resetting the transmit and receive units, masks
2360 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
2361 * reset.
2362 */
ixgbe_reset_hw_X550em(struct ixgbe_hw * hw)2363 s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
2364 {
2365 ixgbe_link_speed link_speed;
2366 s32 status;
2367 u32 ctrl = 0;
2368 u32 i;
2369 bool link_up = false;
2370 u32 swfw_mask = hw->phy.phy_semaphore_mask;
2371
2372 DEBUGFUNC("ixgbe_reset_hw_X550em");
2373
2374 /* Call adapter stop to disable Tx/Rx and clear interrupts */
2375 status = hw->mac.ops.stop_adapter(hw);
2376 if (status != IXGBE_SUCCESS) {
2377 DEBUGOUT1("Failed to stop adapter, STATUS = %d\n", status);
2378 return status;
2379 }
2380 /* flush pending Tx transactions */
2381 ixgbe_clear_tx_pending(hw);
2382
2383 ixgbe_set_mdio_speed(hw);
2384
2385 /* PHY ops must be identified and initialized prior to reset */
2386 status = hw->phy.ops.init(hw);
2387
2388 if (status)
2389 DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n",
2390 status);
2391
2392 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED ||
2393 status == IXGBE_ERR_PHY_ADDR_INVALID) {
2394 DEBUGOUT("Returning from reset HW due to PHY init failure\n");
2395 return status;
2396 }
2397
2398 /* start the external PHY */
2399 if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
2400 status = ixgbe_init_ext_t_x550em(hw);
2401 if (status) {
2402 DEBUGOUT1("Failed to start the external PHY, STATUS = %d\n",
2403 status);
2404 return status;
2405 }
2406 }
2407
2408 /* Setup SFP module if there is one present. */
2409 if (hw->phy.sfp_setup_needed) {
2410 status = hw->mac.ops.setup_sfp(hw);
2411 hw->phy.sfp_setup_needed = false;
2412 }
2413
2414 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
2415 return status;
2416
2417 /* Reset PHY */
2418 if (!hw->phy.reset_disable && hw->phy.ops.reset) {
2419 if (hw->phy.ops.reset(hw) == IXGBE_ERR_OVERTEMP)
2420 return IXGBE_ERR_OVERTEMP;
2421 }
2422
2423 mac_reset_top:
2424 /* Issue global reset to the MAC. Needs to be SW reset if link is up.
2425 * If link reset is used when link is up, it might reset the PHY when
2426 * mng is using it. If link is down or the flag to force full link
2427 * reset is set, then perform link reset.
2428 */
2429 ctrl = IXGBE_CTRL_LNK_RST;
2430 if (!hw->force_full_reset) {
2431 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
2432 if (link_up)
2433 ctrl = IXGBE_CTRL_RST;
2434 }
2435
2436 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
2437 if (status != IXGBE_SUCCESS) {
2438 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
2439 "semaphore failed with %d", status);
2440 return IXGBE_ERR_SWFW_SYNC;
2441 }
2442 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
2443 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
2444 IXGBE_WRITE_FLUSH(hw);
2445 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2446
2447 /* Poll for reset bit to self-clear meaning reset is complete */
2448 for (i = 0; i < 10; i++) {
2449 usec_delay(1);
2450 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
2451 if (!(ctrl & IXGBE_CTRL_RST_MASK))
2452 break;
2453 }
2454
2455 if (ctrl & IXGBE_CTRL_RST_MASK) {
2456 status = IXGBE_ERR_RESET_FAILED;
2457 DEBUGOUT("Reset polling failed to complete.\n");
2458 }
2459
2460 msec_delay(50);
2461
2462 /* Double resets are required for recovery from certain error
2463 * conditions. Between resets, it is necessary to stall to
2464 * allow time for any pending HW events to complete.
2465 */
2466 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
2467 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2468 goto mac_reset_top;
2469 }
2470
2471 /* Store the permanent mac address */
2472 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
2473
2474 /* Store MAC address from RAR0, clear receive address registers, and
2475 * clear the multicast table. Also reset num_rar_entries to 128,
2476 * since we modify this value when programming the SAN MAC address.
2477 */
2478 hw->mac.num_rar_entries = 128;
2479 hw->mac.ops.init_rx_addrs(hw);
2480
2481 ixgbe_set_mdio_speed(hw);
2482
2483 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
2484 ixgbe_setup_mux_ctl(hw);
2485
2486 if (status != IXGBE_SUCCESS)
2487 DEBUGOUT1("Reset HW failed, STATUS = %d\n", status);
2488
2489 return status;
2490 }
2491
2492 /**
2493 * ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
2494 * @hw: pointer to hardware structure
2495 */
ixgbe_init_ext_t_x550em(struct ixgbe_hw * hw)2496 s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
2497 {
2498 u32 status;
2499 u16 reg;
2500
2501 status = hw->phy.ops.read_reg(hw,
2502 IXGBE_MDIO_TX_VENDOR_ALARMS_3,
2503 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
2504 ®);
2505
2506 if (status != IXGBE_SUCCESS)
2507 return status;
2508
2509 /* If PHY FW reset completed bit is set then this is the first
2510 * SW instance after a power on so the PHY FW must be un-stalled.
2511 */
2512 if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
2513 status = hw->phy.ops.read_reg(hw,
2514 IXGBE_MDIO_GLOBAL_RES_PR_10,
2515 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2516 ®);
2517
2518 if (status != IXGBE_SUCCESS)
2519 return status;
2520
2521 reg &= ~IXGBE_MDIO_POWER_UP_STALL;
2522
2523 status = hw->phy.ops.write_reg(hw,
2524 IXGBE_MDIO_GLOBAL_RES_PR_10,
2525 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2526 reg);
2527
2528 if (status != IXGBE_SUCCESS)
2529 return status;
2530 }
2531
2532 return status;
2533 }
2534
2535 /**
2536 * ixgbe_setup_kr_x550em - Configure the KR PHY.
2537 * @hw: pointer to hardware structure
2538 **/
ixgbe_setup_kr_x550em(struct ixgbe_hw * hw)2539 s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
2540 {
2541 /* leave link alone for 2.5G */
2542 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
2543 return IXGBE_SUCCESS;
2544
2545 if (ixgbe_check_reset_blocked(hw))
2546 return 0;
2547
2548 return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
2549 }
2550
2551 /**
2552 * ixgbe_setup_mac_link_sfp_x550em - Setup internal/external the PHY for SFP
2553 * @hw: pointer to hardware structure
2554 * @speed: new link speed
2555 * @autoneg_wait_to_complete: unused
2556 *
2557 * Configure the external PHY and the integrated KR PHY for SFP support.
2558 **/
ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)2559 s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
2560 ixgbe_link_speed speed,
2561 bool autoneg_wait_to_complete)
2562 {
2563 s32 ret_val;
2564 u16 reg_slice, reg_val;
2565 bool setup_linear = false;
2566 UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
2567
2568 /* Check if SFP module is supported and linear */
2569 ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
2570
2571 /* If no SFP module present, then return success. Return success since
2572 * there is no reason to configure CS4227 and SFP not present error is
2573 * not excepted in the setup MAC link flow.
2574 */
2575 if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
2576 return IXGBE_SUCCESS;
2577
2578 if (ret_val != IXGBE_SUCCESS)
2579 return ret_val;
2580
2581 /* Configure internal PHY for KR/KX. */
2582 ixgbe_setup_kr_speed_x550em(hw, speed);
2583
2584 /* Configure CS4227 LINE side to proper mode. */
2585 reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB +
2586 (hw->bus.lan_id << 12);
2587 if (setup_linear)
2588 reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
2589 else
2590 reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
2591 ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
2592 reg_val);
2593 return ret_val;
2594 }
2595
2596 /**
2597 * ixgbe_setup_sfi_x550a - Configure the internal PHY for native SFI mode
2598 * @hw: pointer to hardware structure
2599 * @speed: the link speed to force
2600 *
2601 * Configures the integrated PHY for native SFI mode. Used to connect the
2602 * internal PHY directly to an SFP cage, without autonegotiation.
2603 **/
ixgbe_setup_sfi_x550a(struct ixgbe_hw * hw,ixgbe_link_speed * speed)2604 static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
2605 {
2606 struct ixgbe_mac_info *mac = &hw->mac;
2607 s32 status;
2608 u32 reg_val;
2609
2610 /* Disable all AN and force speed to 10G Serial. */
2611 status = mac->ops.read_iosf_sb_reg(hw,
2612 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2613 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2614 if (status != IXGBE_SUCCESS)
2615 return status;
2616
2617 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2618 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2619 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2620 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2621
2622 /* Select forced link speed for internal PHY. */
2623 switch (*speed) {
2624 case IXGBE_LINK_SPEED_10GB_FULL:
2625 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G;
2626 break;
2627 case IXGBE_LINK_SPEED_1GB_FULL:
2628 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
2629 break;
2630 default:
2631 /* Other link speeds are not supported by internal PHY. */
2632 return IXGBE_ERR_LINK_SETUP;
2633 }
2634
2635 status = mac->ops.write_iosf_sb_reg(hw,
2636 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2637 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2638
2639 /* Toggle port SW reset by AN reset. */
2640 status = ixgbe_restart_an_internal_phy_x550em(hw);
2641
2642 return status;
2643 }
2644
2645 /**
2646 * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP
2647 * @hw: pointer to hardware structure
2648 * @speed: new link speed
2649 * @autoneg_wait_to_complete: unused
2650 *
2651 * Configure the integrated PHY for SFP support.
2652 **/
ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)2653 s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
2654 ixgbe_link_speed speed,
2655 bool autoneg_wait_to_complete)
2656 {
2657 s32 ret_val;
2658 u16 reg_phy_ext;
2659 bool setup_linear = false;
2660 u32 reg_slice, reg_phy_int, slice_offset;
2661
2662 UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
2663
2664 /* Check if SFP module is supported and linear */
2665 ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
2666
2667 /* If no SFP module present, then return success. Return success since
2668 * SFP not present error is not excepted in the setup MAC link flow.
2669 */
2670 if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
2671 return IXGBE_SUCCESS;
2672
2673 if (ret_val != IXGBE_SUCCESS)
2674 return ret_val;
2675
2676 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
2677 /* Configure internal PHY for native SFI based on module type */
2678 ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
2679 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2680 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_phy_int);
2681
2682 if (ret_val != IXGBE_SUCCESS)
2683 return ret_val;
2684
2685 reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA;
2686 if (!setup_linear)
2687 reg_phy_int |= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR;
2688
2689 ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
2690 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2691 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int);
2692
2693 if (ret_val != IXGBE_SUCCESS)
2694 return ret_val;
2695
2696 /* Setup SFI internal link. */
2697 ret_val = ixgbe_setup_sfi_x550a(hw, &speed);
2698 } else {
2699 /* Configure internal PHY for KR/KX. */
2700 ixgbe_setup_kr_speed_x550em(hw, speed);
2701
2702 if (hw->phy.addr == 0x0 || hw->phy.addr == 0xFFFF) {
2703 /* Find Address */
2704 DEBUGOUT("Invalid NW_MNG_IF_SEL.MDIO_PHY_ADD value\n");
2705 return IXGBE_ERR_PHY_ADDR_INVALID;
2706 }
2707
2708 /* Get external PHY SKU id */
2709 ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU,
2710 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2711
2712 if (ret_val != IXGBE_SUCCESS)
2713 return ret_val;
2714
2715 /* When configuring quad port CS4223, the MAC instance is part
2716 * of the slice offset.
2717 */
2718 if (reg_phy_ext == IXGBE_CS4223_SKU_ID)
2719 slice_offset = (hw->bus.lan_id +
2720 (hw->bus.instance_id << 1)) << 12;
2721 else
2722 slice_offset = hw->bus.lan_id << 12;
2723
2724 /* Configure CS4227/CS4223 LINE side to proper mode. */
2725 reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset;
2726
2727 ret_val = hw->phy.ops.read_reg(hw, reg_slice,
2728 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2729
2730 if (ret_val != IXGBE_SUCCESS)
2731 return ret_val;
2732
2733 reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) |
2734 (IXGBE_CS4227_EDC_MODE_SR << 1));
2735
2736 if (setup_linear)
2737 reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
2738 else
2739 reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
2740 ret_val = hw->phy.ops.write_reg(hw, reg_slice,
2741 IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext);
2742
2743 /* Flush previous write with a read */
2744 ret_val = hw->phy.ops.read_reg(hw, reg_slice,
2745 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2746 }
2747 return ret_val;
2748 }
2749
2750 /**
2751 * ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration
2752 * @hw: pointer to hardware structure
2753 *
2754 * iXfI configuration needed for ixgbe_mac_X550EM_x devices.
2755 **/
ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw * hw)2756 static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
2757 {
2758 struct ixgbe_mac_info *mac = &hw->mac;
2759 s32 status;
2760 u32 reg_val;
2761
2762 /* Disable training protocol FSM. */
2763 status = mac->ops.read_iosf_sb_reg(hw,
2764 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
2765 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2766 if (status != IXGBE_SUCCESS)
2767 return status;
2768 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
2769 status = mac->ops.write_iosf_sb_reg(hw,
2770 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
2771 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2772 if (status != IXGBE_SUCCESS)
2773 return status;
2774
2775 /* Disable Flex from training TXFFE. */
2776 status = mac->ops.read_iosf_sb_reg(hw,
2777 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
2778 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2779 if (status != IXGBE_SUCCESS)
2780 return status;
2781 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
2782 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
2783 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
2784 status = mac->ops.write_iosf_sb_reg(hw,
2785 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
2786 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2787 if (status != IXGBE_SUCCESS)
2788 return status;
2789 status = mac->ops.read_iosf_sb_reg(hw,
2790 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
2791 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2792 if (status != IXGBE_SUCCESS)
2793 return status;
2794 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
2795 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
2796 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
2797 status = mac->ops.write_iosf_sb_reg(hw,
2798 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
2799 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2800 if (status != IXGBE_SUCCESS)
2801 return status;
2802
2803 /* Enable override for coefficients. */
2804 status = mac->ops.read_iosf_sb_reg(hw,
2805 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
2806 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2807 if (status != IXGBE_SUCCESS)
2808 return status;
2809 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN;
2810 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
2811 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
2812 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
2813 status = mac->ops.write_iosf_sb_reg(hw,
2814 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
2815 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2816 return status;
2817 }
2818
2819 /**
2820 * ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
2821 * @hw: pointer to hardware structure
2822 * @speed: the link speed to force
2823 *
2824 * Configures the integrated KR PHY to use iXFI mode. Used to connect an
2825 * internal and external PHY at a specific speed, without autonegotiation.
2826 **/
ixgbe_setup_ixfi_x550em(struct ixgbe_hw * hw,ixgbe_link_speed * speed)2827 static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
2828 {
2829 struct ixgbe_mac_info *mac = &hw->mac;
2830 s32 status;
2831 u32 reg_val;
2832
2833 /* iXFI is only supported with X552 */
2834 if (mac->type != ixgbe_mac_X550EM_x)
2835 return IXGBE_ERR_LINK_SETUP;
2836
2837 /* Disable AN and force speed to 10G Serial. */
2838 status = mac->ops.read_iosf_sb_reg(hw,
2839 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2840 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2841 if (status != IXGBE_SUCCESS)
2842 return status;
2843
2844 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
2845 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
2846
2847 /* Select forced link speed for internal PHY. */
2848 switch (*speed) {
2849 case IXGBE_LINK_SPEED_10GB_FULL:
2850 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
2851 break;
2852 case IXGBE_LINK_SPEED_1GB_FULL:
2853 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
2854 break;
2855 default:
2856 /* Other link speeds are not supported by internal KR PHY. */
2857 return IXGBE_ERR_LINK_SETUP;
2858 }
2859
2860 status = mac->ops.write_iosf_sb_reg(hw,
2861 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2862 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2863 if (status != IXGBE_SUCCESS)
2864 return status;
2865
2866 /* Additional configuration needed for x550em_x */
2867 if (hw->mac.type == ixgbe_mac_X550EM_x) {
2868 status = ixgbe_setup_ixfi_x550em_x(hw);
2869 if (status != IXGBE_SUCCESS)
2870 return status;
2871 }
2872
2873 /* Toggle port SW reset by AN reset. */
2874 status = ixgbe_restart_an_internal_phy_x550em(hw);
2875
2876 return status;
2877 }
2878
2879 /**
2880 * ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status
2881 * @hw: address of hardware structure
2882 * @link_up: address of boolean to indicate link status
2883 *
2884 * Returns error code if unable to get link status.
2885 */
ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw * hw,bool * link_up)2886 static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
2887 {
2888 u32 ret;
2889 u16 autoneg_status;
2890
2891 *link_up = false;
2892
2893 /* read this twice back to back to indicate current status */
2894 ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
2895 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2896 &autoneg_status);
2897 if (ret != IXGBE_SUCCESS)
2898 return ret;
2899
2900 ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
2901 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2902 &autoneg_status);
2903 if (ret != IXGBE_SUCCESS)
2904 return ret;
2905
2906 *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS);
2907
2908 return IXGBE_SUCCESS;
2909 }
2910
2911 /**
2912 * ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link
2913 * @hw: point to hardware structure
2914 *
2915 * Configures the link between the integrated KR PHY and the external X557 PHY
2916 * The driver will call this function when it gets a link status change
2917 * interrupt from the X557 PHY. This function configures the link speed
2918 * between the PHYs to match the link speed of the BASE-T link.
2919 *
2920 * A return of a non-zero value indicates an error, and the base driver should
2921 * not report link up.
2922 */
ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw * hw)2923 s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
2924 {
2925 ixgbe_link_speed force_speed;
2926 bool link_up;
2927 u32 status;
2928 u16 speed;
2929
2930 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
2931 return IXGBE_ERR_CONFIG;
2932
2933 if (hw->mac.type == ixgbe_mac_X550EM_x &&
2934 !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
2935 /* If link is down, there is no setup necessary so return */
2936 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
2937 if (status != IXGBE_SUCCESS)
2938 return status;
2939
2940 if (!link_up)
2941 return IXGBE_SUCCESS;
2942
2943 status = hw->phy.ops.read_reg(hw,
2944 IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
2945 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2946 &speed);
2947 if (status != IXGBE_SUCCESS)
2948 return status;
2949
2950 /* If link is still down - no setup is required so return */
2951 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
2952 if (status != IXGBE_SUCCESS)
2953 return status;
2954 if (!link_up)
2955 return IXGBE_SUCCESS;
2956
2957 /* clear everything but the speed and duplex bits */
2958 speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK;
2959
2960 switch (speed) {
2961 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL:
2962 force_speed = IXGBE_LINK_SPEED_10GB_FULL;
2963 break;
2964 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL:
2965 force_speed = IXGBE_LINK_SPEED_1GB_FULL;
2966 break;
2967 default:
2968 /* Internal PHY does not support anything else */
2969 return IXGBE_ERR_INVALID_LINK_SETTINGS;
2970 }
2971
2972 return ixgbe_setup_ixfi_x550em(hw, &force_speed);
2973 } else {
2974 speed = IXGBE_LINK_SPEED_10GB_FULL |
2975 IXGBE_LINK_SPEED_1GB_FULL;
2976 return ixgbe_setup_kr_speed_x550em(hw, speed);
2977 }
2978 }
2979
2980 /**
2981 * ixgbe_setup_phy_loopback_x550em - Configure the KR PHY for loopback.
2982 * @hw: pointer to hardware structure
2983 *
2984 * Configures the integrated KR PHY to use internal loopback mode.
2985 **/
ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw * hw)2986 s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw)
2987 {
2988 s32 status;
2989 u32 reg_val;
2990
2991 /* Disable AN and force speed to 10G Serial. */
2992 status = hw->mac.ops.read_iosf_sb_reg(hw,
2993 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2994 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2995 if (status != IXGBE_SUCCESS)
2996 return status;
2997 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
2998 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
2999 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
3000 status = hw->mac.ops.write_iosf_sb_reg(hw,
3001 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3002 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3003 if (status != IXGBE_SUCCESS)
3004 return status;
3005
3006 /* Set near-end loopback clocks. */
3007 status = hw->mac.ops.read_iosf_sb_reg(hw,
3008 IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
3009 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3010 if (status != IXGBE_SUCCESS)
3011 return status;
3012 reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B;
3013 reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS;
3014 status = hw->mac.ops.write_iosf_sb_reg(hw,
3015 IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
3016 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3017 if (status != IXGBE_SUCCESS)
3018 return status;
3019
3020 /* Set loopback enable. */
3021 status = hw->mac.ops.read_iosf_sb_reg(hw,
3022 IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
3023 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3024 if (status != IXGBE_SUCCESS)
3025 return status;
3026 reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK;
3027 status = hw->mac.ops.write_iosf_sb_reg(hw,
3028 IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
3029 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3030 if (status != IXGBE_SUCCESS)
3031 return status;
3032
3033 /* Training bypass. */
3034 status = hw->mac.ops.read_iosf_sb_reg(hw,
3035 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3036 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3037 if (status != IXGBE_SUCCESS)
3038 return status;
3039 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS;
3040 status = hw->mac.ops.write_iosf_sb_reg(hw,
3041 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3042 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3043
3044 return status;
3045 }
3046
3047 /**
3048 * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
3049 * assuming that the semaphore is already obtained.
3050 * @hw: pointer to hardware structure
3051 * @offset: offset of word in the EEPROM to read
3052 * @data: word read from the EEPROM
3053 *
3054 * Reads a 16 bit word from the EEPROM using the hostif.
3055 **/
ixgbe_read_ee_hostif_X550(struct ixgbe_hw * hw,u16 offset,u16 * data)3056 s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
3057 {
3058 const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
3059 struct ixgbe_hic_read_shadow_ram buffer;
3060 s32 status;
3061
3062 DEBUGFUNC("ixgbe_read_ee_hostif_X550");
3063 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
3064 buffer.hdr.req.buf_lenh = 0;
3065 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
3066 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3067
3068 /* convert offset from words to bytes */
3069 buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
3070 /* one word */
3071 buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
3072 buffer.pad2 = 0;
3073 buffer.data = 0;
3074 buffer.pad3 = 0;
3075
3076 status = hw->mac.ops.acquire_swfw_sync(hw, mask);
3077 if (status)
3078 return status;
3079
3080 status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
3081 IXGBE_HI_COMMAND_TIMEOUT);
3082 if (!status) {
3083 *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
3084 FW_NVM_DATA_OFFSET);
3085 }
3086
3087 hw->mac.ops.release_swfw_sync(hw, mask);
3088 return status;
3089 }
3090
3091 /**
3092 * ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif
3093 * @hw: pointer to hardware structure
3094 * @offset: offset of word in the EEPROM to read
3095 * @words: number of words
3096 * @data: word(s) read from the EEPROM
3097 *
3098 * Reads a 16 bit word(s) from the EEPROM using the hostif.
3099 **/
ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)3100 s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
3101 u16 offset, u16 words, u16 *data)
3102 {
3103 const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
3104 struct ixgbe_hic_read_shadow_ram buffer;
3105 u32 current_word = 0;
3106 u16 words_to_read;
3107 s32 status;
3108 u32 i;
3109
3110 DEBUGFUNC("ixgbe_read_ee_hostif_buffer_X550");
3111
3112 /* Take semaphore for the entire operation. */
3113 status = hw->mac.ops.acquire_swfw_sync(hw, mask);
3114 if (status) {
3115 DEBUGOUT("EEPROM read buffer - semaphore failed\n");
3116 return status;
3117 }
3118
3119 while (words) {
3120 if (words > FW_MAX_READ_BUFFER_SIZE / 2)
3121 words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
3122 else
3123 words_to_read = words;
3124
3125 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
3126 buffer.hdr.req.buf_lenh = 0;
3127 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
3128 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3129
3130 /* convert offset from words to bytes */
3131 buffer.address = IXGBE_CPU_TO_BE32((offset + current_word) * 2);
3132 buffer.length = IXGBE_CPU_TO_BE16(words_to_read * 2);
3133 buffer.pad2 = 0;
3134 buffer.data = 0;
3135 buffer.pad3 = 0;
3136
3137 status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
3138 IXGBE_HI_COMMAND_TIMEOUT);
3139
3140 if (status) {
3141 DEBUGOUT("Host interface command failed\n");
3142 goto out;
3143 }
3144
3145 for (i = 0; i < words_to_read; i++) {
3146 u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) +
3147 2 * i;
3148 u32 value = IXGBE_READ_REG(hw, reg);
3149
3150 data[current_word] = (u16)(value & 0xffff);
3151 current_word++;
3152 i++;
3153 if (i < words_to_read) {
3154 value >>= 16;
3155 data[current_word] = (u16)(value & 0xffff);
3156 current_word++;
3157 }
3158 }
3159 words -= words_to_read;
3160 }
3161
3162 out:
3163 hw->mac.ops.release_swfw_sync(hw, mask);
3164 return status;
3165 }
3166
3167 /**
3168 * ixgbe_write_ee_hostif_data_X550 - Write EEPROM word using hostif
3169 * @hw: pointer to hardware structure
3170 * @offset: offset of word in the EEPROM to write
3171 * @data: word write to the EEPROM
3172 *
3173 * Write a 16 bit word to the EEPROM using the hostif.
3174 **/
ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw * hw,u16 offset,u16 data)3175 s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
3176 u16 data)
3177 {
3178 s32 status;
3179 struct ixgbe_hic_write_shadow_ram buffer;
3180
3181 DEBUGFUNC("ixgbe_write_ee_hostif_data_X550");
3182
3183 buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD;
3184 buffer.hdr.req.buf_lenh = 0;
3185 buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN;
3186 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3187
3188 /* one word */
3189 buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
3190 buffer.data = data;
3191 buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
3192
3193 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
3194 sizeof(buffer),
3195 IXGBE_HI_COMMAND_TIMEOUT, true);
3196 if (status != IXGBE_SUCCESS) {
3197 DEBUGOUT2("for offset %04x failed with status %d\n",
3198 offset, status);
3199 return status;
3200 }
3201
3202 if (buffer.hdr.rsp.buf_lenh_status != FW_CEM_RESP_STATUS_SUCCESS) {
3203 DEBUGOUT2("for offset %04x host interface return status %02x\n",
3204 offset, buffer.hdr.rsp.buf_lenh_status);
3205 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3206 }
3207
3208 return status;
3209 }
3210
3211 /**
3212 * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
3213 * @hw: pointer to hardware structure
3214 * @offset: offset of word in the EEPROM to write
3215 * @data: word write to the EEPROM
3216 *
3217 * Write a 16 bit word to the EEPROM using the hostif.
3218 **/
ixgbe_write_ee_hostif_X550(struct ixgbe_hw * hw,u16 offset,u16 data)3219 s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
3220 u16 data)
3221 {
3222 s32 status = IXGBE_SUCCESS;
3223
3224 DEBUGFUNC("ixgbe_write_ee_hostif_X550");
3225
3226 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
3227 IXGBE_SUCCESS) {
3228 status = ixgbe_write_ee_hostif_data_X550(hw, offset, data);
3229 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3230 } else {
3231 DEBUGOUT("write ee hostif failed to get semaphore");
3232 status = IXGBE_ERR_SWFW_SYNC;
3233 }
3234
3235 return status;
3236 }
3237
3238 /**
3239 * ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif
3240 * @hw: pointer to hardware structure
3241 * @offset: offset of word in the EEPROM to write
3242 * @words: number of words
3243 * @data: word(s) write to the EEPROM
3244 *
3245 * Write a 16 bit word(s) to the EEPROM using the hostif.
3246 **/
ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)3247 s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
3248 u16 offset, u16 words, u16 *data)
3249 {
3250 s32 status = IXGBE_SUCCESS;
3251 u32 i = 0;
3252
3253 DEBUGFUNC("ixgbe_write_ee_hostif_buffer_X550");
3254
3255 /* Take semaphore for the entire operation. */
3256 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3257 if (status != IXGBE_SUCCESS) {
3258 DEBUGOUT("EEPROM write buffer - semaphore failed\n");
3259 goto out;
3260 }
3261
3262 for (i = 0; i < words; i++) {
3263 status = ixgbe_write_ee_hostif_data_X550(hw, offset + i,
3264 data[i]);
3265
3266 if (status != IXGBE_SUCCESS) {
3267 DEBUGOUT("Eeprom buffered write failed\n");
3268 break;
3269 }
3270 }
3271
3272 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3273 out:
3274
3275 return status;
3276 }
3277
3278 /**
3279 * ixgbe_checksum_ptr_x550 - Checksum one pointer region
3280 * @hw: pointer to hardware structure
3281 * @ptr: pointer offset in eeprom
3282 * @size: size of section pointed by ptr, if 0 first word will be used as size
3283 * @csum: address of checksum to update
3284 * @buffer: pointer to buffer containing calculated checksum
3285 * @buffer_size: size of buffer
3286 *
3287 * Returns error status for any failure
3288 */
ixgbe_checksum_ptr_x550(struct ixgbe_hw * hw,u16 ptr,u16 size,u16 * csum,u16 * buffer,u32 buffer_size)3289 static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
3290 u16 size, u16 *csum, u16 *buffer,
3291 u32 buffer_size)
3292 {
3293 u16 buf[256];
3294 s32 status;
3295 u16 length, bufsz, i, start;
3296 u16 *local_buffer;
3297
3298 bufsz = sizeof(buf) / sizeof(buf[0]);
3299
3300 /* Read a chunk at the pointer location */
3301 if (!buffer) {
3302 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf);
3303 if (status) {
3304 DEBUGOUT("Failed to read EEPROM image\n");
3305 return status;
3306 }
3307 local_buffer = buf;
3308 } else {
3309 if (buffer_size < ptr)
3310 return IXGBE_ERR_PARAM;
3311 local_buffer = &buffer[ptr];
3312 }
3313
3314 if (size) {
3315 start = 0;
3316 length = size;
3317 } else {
3318 start = 1;
3319 length = local_buffer[0];
3320
3321 /* Skip pointer section if length is invalid. */
3322 if (length == 0xFFFF || length == 0 ||
3323 (ptr + length) >= hw->eeprom.word_size)
3324 return IXGBE_SUCCESS;
3325 }
3326
3327 if (buffer && ((u32)start + (u32)length > buffer_size))
3328 return IXGBE_ERR_PARAM;
3329
3330 for (i = start; length; i++, length--) {
3331 if (i == bufsz && !buffer) {
3332 ptr += bufsz;
3333 i = 0;
3334 if (length < bufsz)
3335 bufsz = length;
3336
3337 /* Read a chunk at the pointer location */
3338 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr,
3339 bufsz, buf);
3340 if (status) {
3341 DEBUGOUT("Failed to read EEPROM image\n");
3342 return status;
3343 }
3344 }
3345 *csum += local_buffer[i];
3346 }
3347 return IXGBE_SUCCESS;
3348 }
3349
3350 /**
3351 * ixgbe_calc_checksum_X550 - Calculates and returns the checksum
3352 * @hw: pointer to hardware structure
3353 * @buffer: pointer to buffer containing calculated checksum
3354 * @buffer_size: size of buffer
3355 *
3356 * Returns a negative error code on error, or the 16-bit checksum
3357 **/
ixgbe_calc_checksum_X550(struct ixgbe_hw * hw,u16 * buffer,u32 buffer_size)3358 s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size)
3359 {
3360 u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
3361 u16 *local_buffer;
3362 s32 status;
3363 u16 checksum = 0;
3364 u16 pointer, i, size;
3365
3366 DEBUGFUNC("ixgbe_calc_eeprom_checksum_X550");
3367
3368 hw->eeprom.ops.init_params(hw);
3369
3370 if (!buffer) {
3371 /* Read pointer area */
3372 status = ixgbe_read_ee_hostif_buffer_X550(hw, 0,
3373 IXGBE_EEPROM_LAST_WORD + 1,
3374 eeprom_ptrs);
3375 if (status) {
3376 DEBUGOUT("Failed to read EEPROM image\n");
3377 return status;
3378 }
3379 local_buffer = eeprom_ptrs;
3380 } else {
3381 if (buffer_size < IXGBE_EEPROM_LAST_WORD)
3382 return IXGBE_ERR_PARAM;
3383 local_buffer = buffer;
3384 }
3385
3386 /*
3387 * For X550 hardware include 0x0-0x41 in the checksum, skip the
3388 * checksum word itself
3389 */
3390 for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++)
3391 if (i != IXGBE_EEPROM_CHECKSUM)
3392 checksum += local_buffer[i];
3393
3394 /*
3395 * Include all data from pointers 0x3, 0x6-0xE. This excludes the
3396 * FW, PHY module, and PCIe Expansion/Option ROM pointers.
3397 */
3398 for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) {
3399 if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
3400 continue;
3401
3402 pointer = local_buffer[i];
3403
3404 /* Skip pointer section if the pointer is invalid. */
3405 if (pointer == 0xFFFF || pointer == 0 ||
3406 pointer >= hw->eeprom.word_size)
3407 continue;
3408
3409 switch (i) {
3410 case IXGBE_PCIE_GENERAL_PTR:
3411 size = IXGBE_IXGBE_PCIE_GENERAL_SIZE;
3412 break;
3413 case IXGBE_PCIE_CONFIG0_PTR:
3414 case IXGBE_PCIE_CONFIG1_PTR:
3415 size = IXGBE_PCIE_CONFIG_SIZE;
3416 break;
3417 default:
3418 size = 0;
3419 break;
3420 }
3421
3422 status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum,
3423 buffer, buffer_size);
3424 if (status)
3425 return status;
3426 }
3427
3428 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
3429
3430 return (s32)checksum;
3431 }
3432
3433 /**
3434 * ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
3435 * @hw: pointer to hardware structure
3436 *
3437 * Returns a negative error code on error, or the 16-bit checksum
3438 **/
ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw * hw)3439 s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
3440 {
3441 return ixgbe_calc_checksum_X550(hw, NULL, 0);
3442 }
3443
3444 /**
3445 * ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum
3446 * @hw: pointer to hardware structure
3447 * @checksum_val: calculated checksum
3448 *
3449 * Performs checksum calculation and validates the EEPROM checksum. If the
3450 * caller does not need checksum_val, the value can be NULL.
3451 **/
ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw * hw,u16 * checksum_val)3452 s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val)
3453 {
3454 s32 status;
3455 u16 checksum;
3456 u16 read_checksum = 0;
3457
3458 DEBUGFUNC("ixgbe_validate_eeprom_checksum_X550");
3459
3460 /* Read the first word from the EEPROM. If this times out or fails, do
3461 * not continue or we could be in for a very long wait while every
3462 * EEPROM read fails
3463 */
3464 status = hw->eeprom.ops.read(hw, 0, &checksum);
3465 if (status) {
3466 DEBUGOUT("EEPROM read failed\n");
3467 return status;
3468 }
3469
3470 status = hw->eeprom.ops.calc_checksum(hw);
3471 if (status < 0)
3472 return status;
3473
3474 checksum = (u16)(status & 0xffff);
3475
3476 status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
3477 &read_checksum);
3478 if (status)
3479 return status;
3480
3481 /* Verify read checksum from EEPROM is the same as
3482 * calculated checksum
3483 */
3484 if (read_checksum != checksum) {
3485 status = IXGBE_ERR_EEPROM_CHECKSUM;
3486 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
3487 "Invalid EEPROM checksum");
3488 }
3489
3490 /* If the user cares, return the calculated checksum */
3491 if (checksum_val)
3492 *checksum_val = checksum;
3493
3494 return status;
3495 }
3496
3497 /**
3498 * ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash
3499 * @hw: pointer to hardware structure
3500 *
3501 * After writing EEPROM to shadow RAM using EEWR register, software calculates
3502 * checksum and updates the EEPROM and instructs the hardware to update
3503 * the flash.
3504 **/
ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw * hw)3505 s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
3506 {
3507 s32 status;
3508 u16 checksum = 0;
3509
3510 DEBUGFUNC("ixgbe_update_eeprom_checksum_X550");
3511
3512 /* Read the first word from the EEPROM. If this times out or fails, do
3513 * not continue or we could be in for a very long wait while every
3514 * EEPROM read fails
3515 */
3516 status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum);
3517 if (status) {
3518 DEBUGOUT("EEPROM read failed\n");
3519 return status;
3520 }
3521
3522 status = ixgbe_calc_eeprom_checksum_X550(hw);
3523 if (status < 0)
3524 return status;
3525
3526 checksum = (u16)(status & 0xffff);
3527
3528 status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
3529 checksum);
3530 if (status)
3531 return status;
3532
3533 status = ixgbe_update_flash_X550(hw);
3534
3535 return status;
3536 }
3537
3538 /**
3539 * ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device
3540 * @hw: pointer to hardware structure
3541 *
3542 * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
3543 **/
ixgbe_update_flash_X550(struct ixgbe_hw * hw)3544 s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
3545 {
3546 s32 status = IXGBE_SUCCESS;
3547 union ixgbe_hic_hdr2 buffer;
3548
3549 DEBUGFUNC("ixgbe_update_flash_X550");
3550
3551 buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD;
3552 buffer.req.buf_lenh = 0;
3553 buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN;
3554 buffer.req.checksum = FW_DEFAULT_CHECKSUM;
3555
3556 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
3557 sizeof(buffer),
3558 IXGBE_HI_COMMAND_TIMEOUT, false);
3559
3560 return status;
3561 }
3562
3563 /**
3564 * ixgbe_get_supported_physical_layer_X550em - Returns physical layer type
3565 * @hw: pointer to hardware structure
3566 *
3567 * Determines physical layer capabilities of the current configuration.
3568 **/
ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw * hw)3569 u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
3570 {
3571 u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
3572 u16 ext_ability = 0;
3573
3574 DEBUGFUNC("ixgbe_get_supported_physical_layer_X550em");
3575
3576 hw->phy.ops.identify(hw);
3577
3578 switch (hw->phy.type) {
3579 case ixgbe_phy_x550em_kr:
3580 if (hw->mac.type == ixgbe_mac_X550EM_a) {
3581 if (hw->phy.nw_mng_if_sel &
3582 IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
3583 physical_layer =
3584 IXGBE_PHYSICAL_LAYER_2500BASE_KX;
3585 break;
3586 } else if (hw->device_id ==
3587 IXGBE_DEV_ID_X550EM_A_KR_L) {
3588 physical_layer =
3589 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3590 break;
3591 }
3592 }
3593 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR |
3594 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3595 break;
3596 case ixgbe_phy_x550em_xfi:
3597 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR |
3598 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3599 break;
3600 case ixgbe_phy_x550em_kx4:
3601 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
3602 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3603 break;
3604 case ixgbe_phy_x550em_ext_t:
3605 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
3606 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
3607 &ext_ability);
3608 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
3609 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
3610 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
3611 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
3612 break;
3613 case ixgbe_phy_fw:
3614 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_1GB_FULL)
3615 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
3616 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_100_FULL)
3617 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
3618 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_10_FULL)
3619 physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T;
3620 break;
3621 case ixgbe_phy_sgmii:
3622 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3623 break;
3624 case ixgbe_phy_ext_1g_t:
3625 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
3626 break;
3627 default:
3628 break;
3629 }
3630
3631 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
3632 physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
3633
3634 return physical_layer;
3635 }
3636
3637 /**
3638 * ixgbe_get_bus_info_X550em - Set PCI bus info
3639 * @hw: pointer to hardware structure
3640 *
3641 * Sets bus link width and speed to unknown because X550em is
3642 * not a PCI device.
3643 **/
ixgbe_get_bus_info_X550em(struct ixgbe_hw * hw)3644 s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
3645 {
3646
3647 DEBUGFUNC("ixgbe_get_bus_info_x550em");
3648
3649 hw->bus.width = ixgbe_bus_width_unknown;
3650 hw->bus.speed = ixgbe_bus_speed_unknown;
3651
3652 hw->mac.ops.set_lan_id(hw);
3653
3654 return IXGBE_SUCCESS;
3655 }
3656
3657 /**
3658 * ixgbe_disable_rx_x550 - Disable RX unit
3659 * @hw: pointer to hardware structure
3660 *
3661 * Enables the Rx DMA unit for x550
3662 **/
ixgbe_disable_rx_x550(struct ixgbe_hw * hw)3663 void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
3664 {
3665 u32 rxctrl, pfdtxgswc;
3666 s32 status;
3667 struct ixgbe_hic_disable_rxen fw_cmd;
3668
3669 DEBUGFUNC("ixgbe_enable_rx_dma_x550");
3670
3671 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3672 if (rxctrl & IXGBE_RXCTRL_RXEN) {
3673 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
3674 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
3675 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
3676 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
3677 hw->mac.set_lben = true;
3678 } else {
3679 hw->mac.set_lben = false;
3680 }
3681
3682 fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD;
3683 fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN;
3684 fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
3685 fw_cmd.port_number = (u8)hw->bus.lan_id;
3686
3687 status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
3688 sizeof(struct ixgbe_hic_disable_rxen),
3689 IXGBE_HI_COMMAND_TIMEOUT, true);
3690
3691 /* If we fail - disable RX using register write */
3692 if (status) {
3693 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3694 if (rxctrl & IXGBE_RXCTRL_RXEN) {
3695 rxctrl &= ~IXGBE_RXCTRL_RXEN;
3696 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
3697 }
3698 }
3699 }
3700 }
3701
3702 /**
3703 * ixgbe_enter_lplu_t_x550em - Transition to low power states
3704 * @hw: pointer to hardware structure
3705 *
3706 * Configures Low Power Link Up on transition to low power states
3707 * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
3708 * X557 PHY immediately prior to entering LPLU.
3709 **/
ixgbe_enter_lplu_t_x550em(struct ixgbe_hw * hw)3710 s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
3711 {
3712 u16 an_10g_cntl_reg, autoneg_reg, speed;
3713 s32 status;
3714 ixgbe_link_speed lcd_speed;
3715 u32 save_autoneg;
3716 bool link_up;
3717
3718 /* SW LPLU not required on later HW revisions. */
3719 if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
3720 (IXGBE_FUSES0_REV_MASK &
3721 IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
3722 return IXGBE_SUCCESS;
3723
3724 /* If blocked by MNG FW, then don't restart AN */
3725 if (ixgbe_check_reset_blocked(hw))
3726 return IXGBE_SUCCESS;
3727
3728 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3729 if (status != IXGBE_SUCCESS)
3730 return status;
3731
3732 status = ixgbe_read_eeprom(hw, NVM_INIT_CTRL_3, &hw->eeprom.ctrl_word_3);
3733
3734 if (status != IXGBE_SUCCESS)
3735 return status;
3736
3737 /* If link is down, LPLU disabled in NVM, WoL disabled, or manageability
3738 * disabled, then force link down by entering low power mode.
3739 */
3740 if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) ||
3741 !(hw->wol_enabled || ixgbe_mng_present(hw)))
3742 return ixgbe_set_copper_phy_power(hw, false);
3743
3744 /* Determine LCD */
3745 status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed);
3746
3747 if (status != IXGBE_SUCCESS)
3748 return status;
3749
3750 /* If no valid LCD link speed, then force link down and exit. */
3751 if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN)
3752 return ixgbe_set_copper_phy_power(hw, false);
3753
3754 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
3755 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3756 &speed);
3757
3758 if (status != IXGBE_SUCCESS)
3759 return status;
3760
3761 /* If no link now, speed is invalid so take link down */
3762 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3763 if (status != IXGBE_SUCCESS)
3764 return ixgbe_set_copper_phy_power(hw, false);
3765
3766 /* clear everything but the speed bits */
3767 speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK;
3768
3769 /* If current speed is already LCD, then exit. */
3770 if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) &&
3771 (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) ||
3772 ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) &&
3773 (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL)))
3774 return status;
3775
3776 /* Clear AN completed indication */
3777 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM,
3778 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3779 &autoneg_reg);
3780
3781 if (status != IXGBE_SUCCESS)
3782 return status;
3783
3784 status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
3785 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3786 &an_10g_cntl_reg);
3787
3788 if (status != IXGBE_SUCCESS)
3789 return status;
3790
3791 status = hw->phy.ops.read_reg(hw,
3792 IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
3793 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3794 &autoneg_reg);
3795
3796 if (status != IXGBE_SUCCESS)
3797 return status;
3798
3799 save_autoneg = hw->phy.autoneg_advertised;
3800
3801 /* Setup link at least common link speed */
3802 status = hw->mac.ops.setup_link(hw, lcd_speed, false);
3803
3804 /* restore autoneg from before setting lplu speed */
3805 hw->phy.autoneg_advertised = save_autoneg;
3806
3807 return status;
3808 }
3809
3810 /**
3811 * ixgbe_get_lcd_t_x550em - Determine lowest common denominator
3812 * @hw: pointer to hardware structure
3813 * @lcd_speed: pointer to lowest common link speed
3814 *
3815 * Determine lowest common link speed with link partner.
3816 **/
ixgbe_get_lcd_t_x550em(struct ixgbe_hw * hw,ixgbe_link_speed * lcd_speed)3817 s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed)
3818 {
3819 u16 an_lp_status;
3820 s32 status;
3821 u16 word = hw->eeprom.ctrl_word_3;
3822
3823 *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN;
3824
3825 status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS,
3826 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3827 &an_lp_status);
3828
3829 if (status != IXGBE_SUCCESS)
3830 return status;
3831
3832 /* If link partner advertised 1G, return 1G */
3833 if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) {
3834 *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL;
3835 return status;
3836 }
3837
3838 /* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */
3839 if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) ||
3840 (word & NVM_INIT_CTRL_3_D10GMP_PORT0))
3841 return status;
3842
3843 /* Link partner not capable of lower speeds, return 10G */
3844 *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL;
3845 return status;
3846 }
3847
3848 /**
3849 * ixgbe_setup_fc_X550em - Set up flow control
3850 * @hw: pointer to hardware structure
3851 *
3852 * Called at init time to set up flow control.
3853 **/
ixgbe_setup_fc_X550em(struct ixgbe_hw * hw)3854 s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw)
3855 {
3856 s32 ret_val = IXGBE_SUCCESS;
3857 u32 pause, asm_dir, reg_val;
3858
3859 DEBUGFUNC("ixgbe_setup_fc_X550em");
3860
3861 /* Validate the requested mode */
3862 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
3863 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
3864 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
3865 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
3866 goto out;
3867 }
3868
3869 /* 10gig parts do not have a word in the EEPROM to determine the
3870 * default flow control setting, so we explicitly set it to full.
3871 */
3872 if (hw->fc.requested_mode == ixgbe_fc_default)
3873 hw->fc.requested_mode = ixgbe_fc_full;
3874
3875 /* Determine PAUSE and ASM_DIR bits. */
3876 switch (hw->fc.requested_mode) {
3877 case ixgbe_fc_none:
3878 pause = 0;
3879 asm_dir = 0;
3880 break;
3881 case ixgbe_fc_tx_pause:
3882 pause = 0;
3883 asm_dir = 1;
3884 break;
3885 case ixgbe_fc_rx_pause:
3886 /* Rx Flow control is enabled and Tx Flow control is
3887 * disabled by software override. Since there really
3888 * isn't a way to advertise that we are capable of RX
3889 * Pause ONLY, we will advertise that we support both
3890 * symmetric and asymmetric Rx PAUSE, as such we fall
3891 * through to the fc_full statement. Later, we will
3892 * disable the adapter's ability to send PAUSE frames.
3893 */
3894 case ixgbe_fc_full:
3895 pause = 1;
3896 asm_dir = 1;
3897 break;
3898 default:
3899 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
3900 "Flow control param set incorrectly\n");
3901 ret_val = IXGBE_ERR_CONFIG;
3902 goto out;
3903 }
3904
3905 switch (hw->device_id) {
3906 case IXGBE_DEV_ID_X550EM_X_KR:
3907 case IXGBE_DEV_ID_X550EM_A_KR:
3908 case IXGBE_DEV_ID_X550EM_A_KR_L:
3909 ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
3910 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
3911 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3912 if (ret_val != IXGBE_SUCCESS)
3913 goto out;
3914 reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
3915 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
3916 if (pause)
3917 reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
3918 if (asm_dir)
3919 reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
3920 ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
3921 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
3922 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3923
3924 /* This device does not fully support AN. */
3925 hw->fc.disable_fc_autoneg = true;
3926 break;
3927 case IXGBE_DEV_ID_X550EM_X_XFI:
3928 hw->fc.disable_fc_autoneg = true;
3929 break;
3930 default:
3931 break;
3932 }
3933
3934 out:
3935 return ret_val;
3936 }
3937
3938 /**
3939 * ixgbe_fc_autoneg_backplane_x550em_a - Enable flow control IEEE clause 37
3940 * @hw: pointer to hardware structure
3941 *
3942 * Enable flow control according to IEEE clause 37.
3943 **/
ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw * hw)3944 void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw)
3945 {
3946 u32 link_s1, lp_an_page_low, an_cntl_1;
3947 s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
3948 ixgbe_link_speed speed;
3949 bool link_up;
3950
3951 /* AN should have completed when the cable was plugged in.
3952 * Look for reasons to bail out. Bail out if:
3953 * - FC autoneg is disabled, or if
3954 * - link is not up.
3955 */
3956 if (hw->fc.disable_fc_autoneg) {
3957 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
3958 "Flow control autoneg is disabled");
3959 goto out;
3960 }
3961
3962 hw->mac.ops.check_link(hw, &speed, &link_up, false);
3963 if (!link_up) {
3964 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
3965 goto out;
3966 }
3967
3968 /* Check at auto-negotiation has completed */
3969 status = hw->mac.ops.read_iosf_sb_reg(hw,
3970 IXGBE_KRM_LINK_S1(hw->bus.lan_id),
3971 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1);
3972
3973 if (status != IXGBE_SUCCESS ||
3974 (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) {
3975 DEBUGOUT("Auto-Negotiation did not complete\n");
3976 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
3977 goto out;
3978 }
3979
3980 /* Read the 10g AN autoc and LP ability registers and resolve
3981 * local flow control settings accordingly
3982 */
3983 status = hw->mac.ops.read_iosf_sb_reg(hw,
3984 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
3985 IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1);
3986
3987 if (status != IXGBE_SUCCESS) {
3988 DEBUGOUT("Auto-Negotiation did not complete\n");
3989 goto out;
3990 }
3991
3992 status = hw->mac.ops.read_iosf_sb_reg(hw,
3993 IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id),
3994 IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low);
3995
3996 if (status != IXGBE_SUCCESS) {
3997 DEBUGOUT("Auto-Negotiation did not complete\n");
3998 goto out;
3999 }
4000
4001 status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low,
4002 IXGBE_KRM_AN_CNTL_1_SYM_PAUSE,
4003 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE,
4004 IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE,
4005 IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE);
4006
4007 out:
4008 if (status == IXGBE_SUCCESS) {
4009 hw->fc.fc_was_autonegged = true;
4010 } else {
4011 hw->fc.fc_was_autonegged = false;
4012 hw->fc.current_mode = hw->fc.requested_mode;
4013 }
4014 }
4015
4016 /**
4017 * ixgbe_fc_autoneg_fiber_x550em_a - passthrough FC settings
4018 * @hw: pointer to hardware structure
4019 *
4020 **/
ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw * hw)4021 void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw)
4022 {
4023 hw->fc.fc_was_autonegged = false;
4024 hw->fc.current_mode = hw->fc.requested_mode;
4025 }
4026
4027 /**
4028 * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37
4029 * @hw: pointer to hardware structure
4030 *
4031 * Enable flow control according to IEEE clause 37.
4032 **/
ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw * hw)4033 void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
4034 {
4035 s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4036 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
4037 ixgbe_link_speed speed;
4038 bool link_up;
4039
4040 /* AN should have completed when the cable was plugged in.
4041 * Look for reasons to bail out. Bail out if:
4042 * - FC autoneg is disabled, or if
4043 * - link is not up.
4044 */
4045 if (hw->fc.disable_fc_autoneg) {
4046 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4047 "Flow control autoneg is disabled");
4048 goto out;
4049 }
4050
4051 hw->mac.ops.check_link(hw, &speed, &link_up, false);
4052 if (!link_up) {
4053 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
4054 goto out;
4055 }
4056
4057 /* Check if auto-negotiation has completed */
4058 status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info);
4059 if (status != IXGBE_SUCCESS ||
4060 !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) {
4061 DEBUGOUT("Auto-Negotiation did not complete\n");
4062 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4063 goto out;
4064 }
4065
4066 /* Negotiate the flow control */
4067 status = ixgbe_negotiate_fc(hw, info[0], info[0],
4068 FW_PHY_ACT_GET_LINK_INFO_FC_RX,
4069 FW_PHY_ACT_GET_LINK_INFO_FC_TX,
4070 FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX,
4071 FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX);
4072
4073 out:
4074 if (status == IXGBE_SUCCESS) {
4075 hw->fc.fc_was_autonegged = true;
4076 } else {
4077 hw->fc.fc_was_autonegged = false;
4078 hw->fc.current_mode = hw->fc.requested_mode;
4079 }
4080 }
4081
4082 /**
4083 * ixgbe_setup_fc_backplane_x550em_a - Set up flow control
4084 * @hw: pointer to hardware structure
4085 *
4086 * Called at init time to set up flow control.
4087 **/
ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw * hw)4088 s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
4089 {
4090 s32 status = IXGBE_SUCCESS;
4091 u32 an_cntl = 0;
4092
4093 DEBUGFUNC("ixgbe_setup_fc_backplane_x550em_a");
4094
4095 /* Validate the requested mode */
4096 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
4097 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4098 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
4099 return IXGBE_ERR_INVALID_LINK_SETTINGS;
4100 }
4101
4102 if (hw->fc.requested_mode == ixgbe_fc_default)
4103 hw->fc.requested_mode = ixgbe_fc_full;
4104
4105 /* Set up the 1G and 10G flow control advertisement registers so the
4106 * HW will be able to do FC autoneg once the cable is plugged in. If
4107 * we link at 10G, the 1G advertisement is harmless and vice versa.
4108 */
4109 status = hw->mac.ops.read_iosf_sb_reg(hw,
4110 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4111 IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl);
4112
4113 if (status != IXGBE_SUCCESS) {
4114 DEBUGOUT("Auto-Negotiation did not complete\n");
4115 return status;
4116 }
4117
4118 /* The possible values of fc.requested_mode are:
4119 * 0: Flow control is completely disabled
4120 * 1: Rx flow control is enabled (we can receive pause frames,
4121 * but not send pause frames).
4122 * 2: Tx flow control is enabled (we can send pause frames but
4123 * we do not support receiving pause frames).
4124 * 3: Both Rx and Tx flow control (symmetric) are enabled.
4125 * other: Invalid.
4126 */
4127 switch (hw->fc.requested_mode) {
4128 case ixgbe_fc_none:
4129 /* Flow control completely disabled by software override. */
4130 an_cntl &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4131 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
4132 break;
4133 case ixgbe_fc_tx_pause:
4134 /* Tx Flow control is enabled, and Rx Flow control is
4135 * disabled by software override.
4136 */
4137 an_cntl |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4138 an_cntl &= ~IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
4139 break;
4140 case ixgbe_fc_rx_pause:
4141 /* Rx Flow control is enabled and Tx Flow control is
4142 * disabled by software override. Since there really
4143 * isn't a way to advertise that we are capable of RX
4144 * Pause ONLY, we will advertise that we support both
4145 * symmetric and asymmetric Rx PAUSE, as such we fall
4146 * through to the fc_full statement. Later, we will
4147 * disable the adapter's ability to send PAUSE frames.
4148 */
4149 case ixgbe_fc_full:
4150 /* Flow control (both Rx and Tx) is enabled by SW override. */
4151 an_cntl |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4152 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4153 break;
4154 default:
4155 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
4156 "Flow control param set incorrectly\n");
4157 return IXGBE_ERR_CONFIG;
4158 }
4159
4160 status = hw->mac.ops.write_iosf_sb_reg(hw,
4161 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4162 IXGBE_SB_IOSF_TARGET_KR_PHY, an_cntl);
4163
4164 /* Restart auto-negotiation. */
4165 status = ixgbe_restart_an_internal_phy_x550em(hw);
4166
4167 return status;
4168 }
4169
4170 /**
4171 * ixgbe_set_mux - Set mux for port 1 access with CS4227
4172 * @hw: pointer to hardware structure
4173 * @state: set mux if 1, clear if 0
4174 */
ixgbe_set_mux(struct ixgbe_hw * hw,u8 state)4175 static void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state)
4176 {
4177 u32 esdp;
4178
4179 if (!hw->bus.lan_id)
4180 return;
4181 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
4182 if (state)
4183 esdp |= IXGBE_ESDP_SDP1;
4184 else
4185 esdp &= ~IXGBE_ESDP_SDP1;
4186 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4187 IXGBE_WRITE_FLUSH(hw);
4188 }
4189
4190 /**
4191 * ixgbe_acquire_swfw_sync_X550em - Acquire SWFW semaphore
4192 * @hw: pointer to hardware structure
4193 * @mask: Mask to specify which semaphore to acquire
4194 *
4195 * Acquires the SWFW semaphore and sets the I2C MUX
4196 **/
ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw * hw,u32 mask)4197 s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
4198 {
4199 s32 status;
4200
4201 DEBUGFUNC("ixgbe_acquire_swfw_sync_X550em");
4202
4203 status = ixgbe_acquire_swfw_sync_X540(hw, mask);
4204 if (status)
4205 return status;
4206
4207 if (mask & IXGBE_GSSR_I2C_MASK)
4208 ixgbe_set_mux(hw, 1);
4209
4210 return IXGBE_SUCCESS;
4211 }
4212
4213 /**
4214 * ixgbe_release_swfw_sync_X550em - Release SWFW semaphore
4215 * @hw: pointer to hardware structure
4216 * @mask: Mask to specify which semaphore to release
4217 *
4218 * Releases the SWFW semaphore and sets the I2C MUX
4219 **/
ixgbe_release_swfw_sync_X550em(struct ixgbe_hw * hw,u32 mask)4220 void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
4221 {
4222 DEBUGFUNC("ixgbe_release_swfw_sync_X550em");
4223
4224 if (mask & IXGBE_GSSR_I2C_MASK)
4225 ixgbe_set_mux(hw, 0);
4226
4227 ixgbe_release_swfw_sync_X540(hw, mask);
4228 }
4229
4230 /**
4231 * ixgbe_acquire_swfw_sync_X550a - Acquire SWFW semaphore
4232 * @hw: pointer to hardware structure
4233 * @mask: Mask to specify which semaphore to acquire
4234 *
4235 * Acquires the SWFW semaphore and get the shared phy token as needed
4236 */
ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw * hw,u32 mask)4237 static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
4238 {
4239 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
4240 int retries = FW_PHY_TOKEN_RETRIES;
4241 s32 status = IXGBE_SUCCESS;
4242
4243 DEBUGFUNC("ixgbe_acquire_swfw_sync_X550a");
4244
4245 status = IXGBE_SUCCESS;
4246 if (hmask)
4247 status = ixgbe_acquire_swfw_sync_X540(hw, hmask);
4248
4249 if (status) {
4250 DEBUGOUT1("Could not acquire SWFW semaphore, Status = %d\n", status);
4251 return status;
4252 }
4253
4254 if (!(mask & IXGBE_GSSR_TOKEN_SM))
4255 return IXGBE_SUCCESS;
4256
4257 while (--retries) {
4258 status = ixgbe_get_phy_token(hw);
4259
4260 if (status == IXGBE_SUCCESS)
4261 return IXGBE_SUCCESS;
4262
4263 if (status != IXGBE_ERR_TOKEN_RETRY) {
4264 DEBUGOUT1("Retry acquiring the PHY token failed, Status = %d\n", status);
4265 if (hmask)
4266 ixgbe_release_swfw_sync_X540(hw, hmask);
4267 return status;
4268 }
4269
4270 if (status == IXGBE_ERR_TOKEN_RETRY)
4271 DEBUGOUT1("Could not acquire PHY token, Status = %d\n",
4272 status);
4273 }
4274
4275 if (hmask)
4276 ixgbe_release_swfw_sync_X540(hw, hmask);
4277
4278 DEBUGOUT1("Semaphore acquisition retries failed!: PHY ID = 0x%08X\n",
4279 hw->phy.id);
4280 return status;
4281 }
4282
4283 /**
4284 * ixgbe_release_swfw_sync_X550a - Release SWFW semaphore
4285 * @hw: pointer to hardware structure
4286 * @mask: Mask to specify which semaphore to release
4287 *
4288 * Releases the SWFW semaphore and puts the shared phy token as needed
4289 */
ixgbe_release_swfw_sync_X550a(struct ixgbe_hw * hw,u32 mask)4290 static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
4291 {
4292 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
4293
4294 DEBUGFUNC("ixgbe_release_swfw_sync_X550a");
4295
4296 if (mask & IXGBE_GSSR_TOKEN_SM)
4297 ixgbe_put_phy_token(hw);
4298
4299 if (hmask)
4300 ixgbe_release_swfw_sync_X540(hw, hmask);
4301 }
4302
4303 /**
4304 * ixgbe_read_phy_reg_x550a - Reads specified PHY register
4305 * @hw: pointer to hardware structure
4306 * @reg_addr: 32 bit address of PHY register to read
4307 * @device_type: 5 bit device type
4308 * @phy_data: Pointer to read data from PHY register
4309 *
4310 * Reads a value from a specified PHY register using the SWFW lock and PHY
4311 * Token. The PHY Token is needed since the MDIO is shared between to MAC
4312 * instances.
4313 **/
ixgbe_read_phy_reg_x550a(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u16 * phy_data)4314 s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
4315 u32 device_type, u16 *phy_data)
4316 {
4317 s32 status;
4318 u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
4319
4320 DEBUGFUNC("ixgbe_read_phy_reg_x550a");
4321
4322 if (hw->mac.ops.acquire_swfw_sync(hw, mask))
4323 return IXGBE_ERR_SWFW_SYNC;
4324
4325 status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
4326
4327 hw->mac.ops.release_swfw_sync(hw, mask);
4328
4329 return status;
4330 }
4331
4332 /**
4333 * ixgbe_write_phy_reg_x550a - Writes specified PHY register
4334 * @hw: pointer to hardware structure
4335 * @reg_addr: 32 bit PHY register to write
4336 * @device_type: 5 bit device type
4337 * @phy_data: Data to write to the PHY register
4338 *
4339 * Writes a value to specified PHY register using the SWFW lock and PHY Token.
4340 * The PHY Token is needed since the MDIO is shared between to MAC instances.
4341 **/
ixgbe_write_phy_reg_x550a(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u16 phy_data)4342 s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
4343 u32 device_type, u16 phy_data)
4344 {
4345 s32 status;
4346 u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
4347
4348 DEBUGFUNC("ixgbe_write_phy_reg_x550a");
4349
4350 if (hw->mac.ops.acquire_swfw_sync(hw, mask) == IXGBE_SUCCESS) {
4351 status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type,
4352 phy_data);
4353 hw->mac.ops.release_swfw_sync(hw, mask);
4354 } else {
4355 status = IXGBE_ERR_SWFW_SYNC;
4356 }
4357
4358 return status;
4359 }
4360
4361 /**
4362 * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt
4363 * @hw: pointer to hardware structure
4364 *
4365 * Handle external Base T PHY interrupt. If high temperature
4366 * failure alarm then return error, else if link status change
4367 * then setup internal/external PHY link
4368 *
4369 * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
4370 * failure alarm, else return PHY access status.
4371 */
ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw * hw)4372 s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw)
4373 {
4374 bool lsc;
4375 u32 status;
4376
4377 status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
4378
4379 if (status != IXGBE_SUCCESS)
4380 return status;
4381
4382 if (lsc)
4383 return ixgbe_setup_internal_phy(hw);
4384
4385 return IXGBE_SUCCESS;
4386 }
4387
4388 /**
4389 * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
4390 * @hw: pointer to hardware structure
4391 * @speed: new link speed
4392 * @autoneg_wait_to_complete: true when waiting for completion is needed
4393 *
4394 * Setup internal/external PHY link speed based on link speed, then set
4395 * external PHY auto advertised link speed.
4396 *
4397 * Returns error status for any failure
4398 **/
ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)4399 s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
4400 ixgbe_link_speed speed,
4401 bool autoneg_wait_to_complete)
4402 {
4403 s32 status;
4404 ixgbe_link_speed force_speed;
4405 u32 i;
4406 bool link_up = false;
4407
4408 DEBUGFUNC("ixgbe_setup_mac_link_t_X550em");
4409
4410 /* Setup internal/external PHY link speed to iXFI (10G), unless
4411 * only 1G is auto advertised then setup KX link.
4412 */
4413 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
4414 force_speed = IXGBE_LINK_SPEED_10GB_FULL;
4415 else
4416 force_speed = IXGBE_LINK_SPEED_1GB_FULL;
4417
4418 /* If X552 and internal link mode is XFI, then setup XFI internal link.
4419 */
4420 if (hw->mac.type == ixgbe_mac_X550EM_x &&
4421 !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
4422 status = ixgbe_setup_ixfi_x550em(hw, &force_speed);
4423
4424 if (status != IXGBE_SUCCESS)
4425 return status;
4426
4427 /* Wait for the controller to acquire link */
4428 for (i = 0; i < 10; i++) {
4429 msec_delay(100);
4430
4431 status = ixgbe_check_link(hw, &force_speed, &link_up,
4432 false);
4433 if (status != IXGBE_SUCCESS)
4434 return status;
4435
4436 if (link_up)
4437 break;
4438 }
4439 }
4440
4441 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete);
4442 }
4443
4444 /**
4445 * ixgbe_check_link_t_X550em - Determine link and speed status
4446 * @hw: pointer to hardware structure
4447 * @speed: pointer to link speed
4448 * @link_up: true when link is up
4449 * @link_up_wait_to_complete: bool used to wait for link up or not
4450 *
4451 * Check that both the MAC and X557 external PHY have link.
4452 **/
ixgbe_check_link_t_X550em(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,bool link_up_wait_to_complete)4453 s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4454 bool *link_up, bool link_up_wait_to_complete)
4455 {
4456 u32 status;
4457 u16 i, autoneg_status = 0;
4458
4459 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
4460 return IXGBE_ERR_CONFIG;
4461
4462 status = ixgbe_check_mac_link_generic(hw, speed, link_up,
4463 link_up_wait_to_complete);
4464
4465 /* If check link fails or MAC link is not up, then return */
4466 if (status != IXGBE_SUCCESS || !(*link_up))
4467 return status;
4468
4469 /* MAC link is up, so check external PHY link.
4470 * X557 PHY. Link status is latching low, and can only be used to detect
4471 * link drop, and not the current status of the link without performing
4472 * back-to-back reads.
4473 */
4474 for (i = 0; i < 2; i++) {
4475 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
4476 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4477 &autoneg_status);
4478
4479 if (status != IXGBE_SUCCESS)
4480 return status;
4481 }
4482
4483 /* If external PHY link is not up, then indicate link not up */
4484 if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
4485 *link_up = false;
4486
4487 return IXGBE_SUCCESS;
4488 }
4489
4490 /**
4491 * ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI
4492 * @hw: pointer to hardware structure
4493 **/
ixgbe_reset_phy_t_X550em(struct ixgbe_hw * hw)4494 s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
4495 {
4496 s32 status;
4497
4498 status = ixgbe_reset_phy_generic(hw);
4499
4500 if (status != IXGBE_SUCCESS)
4501 return status;
4502
4503 /* Configure Link Status Alarm and Temperature Threshold interrupts */
4504 return ixgbe_enable_lasi_ext_t_x550em(hw);
4505 }
4506
4507 /**
4508 * ixgbe_led_on_t_X550em - Turns on the software controllable LEDs.
4509 * @hw: pointer to hardware structure
4510 * @led_idx: led number to turn on
4511 **/
ixgbe_led_on_t_X550em(struct ixgbe_hw * hw,u32 led_idx)4512 s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
4513 {
4514 u16 phy_data;
4515
4516 DEBUGFUNC("ixgbe_led_on_t_X550em");
4517
4518 if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
4519 return IXGBE_ERR_PARAM;
4520
4521 /* To turn on the LED, set mode to ON. */
4522 ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4523 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
4524 phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK;
4525 ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4526 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
4527
4528 /* Some designs have the LEDs wired to the MAC */
4529 return ixgbe_led_on_generic(hw, led_idx);
4530 }
4531
4532 /**
4533 * ixgbe_led_off_t_X550em - Turns off the software controllable LEDs.
4534 * @hw: pointer to hardware structure
4535 * @led_idx: led number to turn off
4536 **/
ixgbe_led_off_t_X550em(struct ixgbe_hw * hw,u32 led_idx)4537 s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
4538 {
4539 u16 phy_data;
4540
4541 DEBUGFUNC("ixgbe_led_off_t_X550em");
4542
4543 if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
4544 return IXGBE_ERR_PARAM;
4545
4546 /* To turn on the LED, set mode to ON. */
4547 ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4548 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
4549 phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK;
4550 ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4551 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
4552
4553 /* Some designs have the LEDs wired to the MAC */
4554 return ixgbe_led_off_generic(hw, led_idx);
4555 }
4556
4557 /**
4558 * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware
4559 * @hw: pointer to the HW structure
4560 * @maj: driver version major number
4561 * @min: driver version minor number
4562 * @build: driver version build number
4563 * @sub: driver version sub build number
4564 * @len: length of driver_ver string
4565 * @driver_ver: driver string
4566 *
4567 * Sends driver version number to firmware through the manageability
4568 * block. On success return IXGBE_SUCCESS
4569 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4570 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4571 **/
ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw * hw,u8 maj,u8 min,u8 build,u8 sub,u16 len,const char * driver_ver)4572 s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
4573 u8 build, u8 sub, u16 len, const char *driver_ver)
4574 {
4575 struct ixgbe_hic_drv_info2 fw_cmd;
4576 s32 ret_val = IXGBE_SUCCESS;
4577 int i;
4578
4579 DEBUGFUNC("ixgbe_set_fw_drv_ver_x550");
4580
4581 if ((len == 0) || (driver_ver == NULL) ||
4582 (len > sizeof(fw_cmd.driver_string)))
4583 return IXGBE_ERR_INVALID_ARGUMENT;
4584
4585 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4586 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len;
4587 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4588 fw_cmd.port_num = (u8)hw->bus.func;
4589 fw_cmd.ver_maj = maj;
4590 fw_cmd.ver_min = min;
4591 fw_cmd.ver_build = build;
4592 fw_cmd.ver_sub = sub;
4593 fw_cmd.hdr.checksum = 0;
4594 memcpy(fw_cmd.driver_string, driver_ver, len);
4595 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4596 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4597
4598 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4599 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4600 sizeof(fw_cmd),
4601 IXGBE_HI_COMMAND_TIMEOUT,
4602 true);
4603 if (ret_val != IXGBE_SUCCESS)
4604 continue;
4605
4606 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4607 FW_CEM_RESP_STATUS_SUCCESS)
4608 ret_val = IXGBE_SUCCESS;
4609 else
4610 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4611
4612 break;
4613 }
4614
4615 return ret_val;
4616 }
4617
4618 /**
4619 * ixgbe_fw_recovery_mode_X550 - Check FW NVM recovery mode
4620 * @hw: pointer t hardware structure
4621 *
4622 * Returns true if in FW NVM recovery mode.
4623 **/
ixgbe_fw_recovery_mode_X550(struct ixgbe_hw * hw)4624 bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw)
4625 {
4626 u32 fwsm;
4627
4628 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
4629
4630 return !!(fwsm & IXGBE_FWSM_FW_NVM_RECOVERY_MODE);
4631 }
4632