1 /******************************************************************************
2
3 Copyright (c) 2001-2017, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 #include "ixgbe_x550.h"
36 #include "ixgbe_x540.h"
37 #include "ixgbe_type.h"
38 #include "ixgbe_api.h"
39 #include "ixgbe_common.h"
40 #include "ixgbe_phy.h"
41
42 static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed);
43 static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
44 static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
45 static s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw);
46
47 /**
48 * ixgbe_init_ops_X550 - Inits func ptrs and MAC type
49 * @hw: pointer to hardware structure
50 *
51 * Initialize the function pointers and assign the MAC type for X550.
52 * Does not touch the hardware.
53 **/
ixgbe_init_ops_X550(struct ixgbe_hw * hw)54 s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw)
55 {
56 struct ixgbe_mac_info *mac = &hw->mac;
57 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
58 s32 ret_val;
59
60 DEBUGFUNC("ixgbe_init_ops_X550");
61
62 ret_val = ixgbe_init_ops_X540(hw);
63 mac->ops.dmac_config = ixgbe_dmac_config_X550;
64 mac->ops.dmac_config_tcs = ixgbe_dmac_config_tcs_X550;
65 mac->ops.dmac_update_tcs = ixgbe_dmac_update_tcs_X550;
66 mac->ops.setup_eee = NULL;
67 mac->ops.set_source_address_pruning =
68 ixgbe_set_source_address_pruning_X550;
69 mac->ops.set_ethertype_anti_spoofing =
70 ixgbe_set_ethertype_anti_spoofing_X550;
71
72 mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
73 eeprom->ops.init_params = ixgbe_init_eeprom_params_X550;
74 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
75 eeprom->ops.read = ixgbe_read_ee_hostif_X550;
76 eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
77 eeprom->ops.write = ixgbe_write_ee_hostif_X550;
78 eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
79 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
80 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
81
82 mac->ops.disable_mdd = ixgbe_disable_mdd_X550;
83 mac->ops.enable_mdd = ixgbe_enable_mdd_X550;
84 mac->ops.mdd_event = ixgbe_mdd_event_X550;
85 mac->ops.restore_mdd_vf = ixgbe_restore_mdd_vf_X550;
86 mac->ops.disable_rx = ixgbe_disable_rx_x550;
87 /* Manageability interface */
88 mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550;
89 switch (hw->device_id) {
90 case IXGBE_DEV_ID_X550EM_X_1G_T:
91 hw->mac.ops.led_on = NULL;
92 hw->mac.ops.led_off = NULL;
93 break;
94 case IXGBE_DEV_ID_X550EM_X_10G_T:
95 case IXGBE_DEV_ID_X550EM_A_10G_T:
96 hw->mac.ops.led_on = ixgbe_led_on_t_X550em;
97 hw->mac.ops.led_off = ixgbe_led_off_t_X550em;
98 break;
99 default:
100 break;
101 }
102 return ret_val;
103 }
104
105 /**
106 * ixgbe_read_cs4227 - Read CS4227 register
107 * @hw: pointer to hardware structure
108 * @reg: register number to write
109 * @value: pointer to receive value read
110 *
111 * Returns status code
112 **/
ixgbe_read_cs4227(struct ixgbe_hw * hw,u16 reg,u16 * value)113 static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
114 {
115 return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value);
116 }
117
118 /**
119 * ixgbe_write_cs4227 - Write CS4227 register
120 * @hw: pointer to hardware structure
121 * @reg: register number to write
122 * @value: value to write to register
123 *
124 * Returns status code
125 **/
ixgbe_write_cs4227(struct ixgbe_hw * hw,u16 reg,u16 value)126 static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
127 {
128 return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value);
129 }
130
131 /**
132 * ixgbe_read_pe - Read register from port expander
133 * @hw: pointer to hardware structure
134 * @reg: register number to read
135 * @value: pointer to receive read value
136 *
137 * Returns status code
138 **/
ixgbe_read_pe(struct ixgbe_hw * hw,u8 reg,u8 * value)139 static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
140 {
141 s32 status;
142
143 status = ixgbe_read_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
144 if (status != IXGBE_SUCCESS)
145 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
146 "port expander access failed with %d\n", status);
147 return status;
148 }
149
150 /**
151 * ixgbe_write_pe - Write register to port expander
152 * @hw: pointer to hardware structure
153 * @reg: register number to write
154 * @value: value to write
155 *
156 * Returns status code
157 **/
ixgbe_write_pe(struct ixgbe_hw * hw,u8 reg,u8 value)158 static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
159 {
160 s32 status;
161
162 status = ixgbe_write_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
163 if (status != IXGBE_SUCCESS)
164 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
165 "port expander access failed with %d\n", status);
166 return status;
167 }
168
169 /**
170 * ixgbe_reset_cs4227 - Reset CS4227 using port expander
171 * @hw: pointer to hardware structure
172 *
173 * This function assumes that the caller has acquired the proper semaphore.
174 * Returns error code
175 **/
ixgbe_reset_cs4227(struct ixgbe_hw * hw)176 static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
177 {
178 s32 status;
179 u32 retry;
180 u16 value;
181 u8 reg;
182
183 /* Trigger hard reset. */
184 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
185 if (status != IXGBE_SUCCESS)
186 return status;
187 reg |= IXGBE_PE_BIT1;
188 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
189 if (status != IXGBE_SUCCESS)
190 return status;
191
192 status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, ®);
193 if (status != IXGBE_SUCCESS)
194 return status;
195 reg &= ~IXGBE_PE_BIT1;
196 status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg);
197 if (status != IXGBE_SUCCESS)
198 return status;
199
200 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
201 if (status != IXGBE_SUCCESS)
202 return status;
203 reg &= ~IXGBE_PE_BIT1;
204 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
205 if (status != IXGBE_SUCCESS)
206 return status;
207
208 usec_delay(IXGBE_CS4227_RESET_HOLD);
209
210 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
211 if (status != IXGBE_SUCCESS)
212 return status;
213 reg |= IXGBE_PE_BIT1;
214 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
215 if (status != IXGBE_SUCCESS)
216 return status;
217
218 /* Wait for the reset to complete. */
219 msec_delay(IXGBE_CS4227_RESET_DELAY);
220 for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
221 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EFUSE_STATUS,
222 &value);
223 if (status == IXGBE_SUCCESS &&
224 value == IXGBE_CS4227_EEPROM_LOAD_OK)
225 break;
226 msec_delay(IXGBE_CS4227_CHECK_DELAY);
227 }
228 if (retry == IXGBE_CS4227_RETRIES) {
229 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
230 "CS4227 reset did not complete.");
231 return IXGBE_ERR_PHY;
232 }
233
234 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value);
235 if (status != IXGBE_SUCCESS ||
236 !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) {
237 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
238 "CS4227 EEPROM did not load successfully.");
239 return IXGBE_ERR_PHY;
240 }
241
242 return IXGBE_SUCCESS;
243 }
244
245 /**
246 * ixgbe_check_cs4227 - Check CS4227 and reset as needed
247 * @hw: pointer to hardware structure
248 **/
ixgbe_check_cs4227(struct ixgbe_hw * hw)249 static void ixgbe_check_cs4227(struct ixgbe_hw *hw)
250 {
251 s32 status = IXGBE_SUCCESS;
252 u32 swfw_mask = hw->phy.phy_semaphore_mask;
253 u16 value = 0;
254 u8 retry;
255
256 for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
257 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
258 if (status != IXGBE_SUCCESS) {
259 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
260 "semaphore failed with %d", status);
261 msec_delay(IXGBE_CS4227_CHECK_DELAY);
262 continue;
263 }
264
265 /* Get status of reset flow. */
266 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value);
267
268 if (status == IXGBE_SUCCESS &&
269 value == IXGBE_CS4227_RESET_COMPLETE)
270 goto out;
271
272 if (status != IXGBE_SUCCESS ||
273 value != IXGBE_CS4227_RESET_PENDING)
274 break;
275
276 /* Reset is pending. Wait and check again. */
277 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
278 msec_delay(IXGBE_CS4227_CHECK_DELAY);
279 }
280
281 /* If still pending, assume other instance failed. */
282 if (retry == IXGBE_CS4227_RETRIES) {
283 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
284 if (status != IXGBE_SUCCESS) {
285 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
286 "semaphore failed with %d", status);
287 return;
288 }
289 }
290
291 /* Reset the CS4227. */
292 status = ixgbe_reset_cs4227(hw);
293 if (status != IXGBE_SUCCESS) {
294 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
295 "CS4227 reset failed: %d", status);
296 goto out;
297 }
298
299 /* Reset takes so long, temporarily release semaphore in case the
300 * other driver instance is waiting for the reset indication.
301 */
302 ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
303 IXGBE_CS4227_RESET_PENDING);
304 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
305 msec_delay(10);
306 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
307 if (status != IXGBE_SUCCESS) {
308 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
309 "semaphore failed with %d", status);
310 return;
311 }
312
313 /* Record completion for next time. */
314 status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
315 IXGBE_CS4227_RESET_COMPLETE);
316
317 out:
318 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
319 msec_delay(hw->eeprom.semaphore_delay);
320 }
321
322 /**
323 * ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control
324 * @hw: pointer to hardware structure
325 **/
ixgbe_setup_mux_ctl(struct ixgbe_hw * hw)326 static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
327 {
328 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
329
330 if (hw->bus.lan_id) {
331 esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
332 esdp |= IXGBE_ESDP_SDP1_DIR;
333 }
334 esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
335 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
336 IXGBE_WRITE_FLUSH(hw);
337 }
338
339 /**
340 * ixgbe_identify_phy_x550em - Get PHY type based on device id
341 * @hw: pointer to hardware structure
342 *
343 * Returns error code
344 */
ixgbe_identify_phy_x550em(struct ixgbe_hw * hw)345 static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
346 {
347 hw->mac.ops.set_lan_id(hw);
348
349 ixgbe_read_mng_if_sel_x550em(hw);
350
351 switch (hw->device_id) {
352 case IXGBE_DEV_ID_X550EM_A_SFP:
353 return ixgbe_identify_module_generic(hw);
354 case IXGBE_DEV_ID_X550EM_X_SFP:
355 /* set up for CS4227 usage */
356 ixgbe_setup_mux_ctl(hw);
357 ixgbe_check_cs4227(hw);
358 /* Fallthrough */
359
360 case IXGBE_DEV_ID_X550EM_A_SFP_N:
361 return ixgbe_identify_module_generic(hw);
362 break;
363 case IXGBE_DEV_ID_X550EM_X_KX4:
364 hw->phy.type = ixgbe_phy_x550em_kx4;
365 break;
366 case IXGBE_DEV_ID_X550EM_X_XFI:
367 hw->phy.type = ixgbe_phy_x550em_xfi;
368 break;
369 case IXGBE_DEV_ID_X550EM_X_KR:
370 case IXGBE_DEV_ID_X550EM_A_KR:
371 case IXGBE_DEV_ID_X550EM_A_KR_L:
372 hw->phy.type = ixgbe_phy_x550em_kr;
373 break;
374 case IXGBE_DEV_ID_X550EM_A_10G_T:
375 case IXGBE_DEV_ID_X550EM_X_10G_T:
376 return ixgbe_identify_phy_generic(hw);
377 case IXGBE_DEV_ID_X550EM_X_1G_T:
378 hw->phy.type = ixgbe_phy_ext_1g_t;
379 break;
380 case IXGBE_DEV_ID_X550EM_A_1G_T:
381 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
382 hw->phy.type = ixgbe_phy_fw;
383 if (hw->bus.lan_id)
384 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
385 else
386 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
387 break;
388 default:
389 break;
390 }
391 return IXGBE_SUCCESS;
392 }
393
394 /**
395 * ixgbe_fw_phy_activity - Perform an activity on a PHY
396 * @hw: pointer to hardware structure
397 * @activity: activity to perform
398 * @data: Pointer to 4 32-bit words of data
399 */
ixgbe_fw_phy_activity(struct ixgbe_hw * hw,u16 activity,u32 (* data)[FW_PHY_ACT_DATA_COUNT])400 s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
401 u32 (*data)[FW_PHY_ACT_DATA_COUNT])
402 {
403 union {
404 struct ixgbe_hic_phy_activity_req cmd;
405 struct ixgbe_hic_phy_activity_resp rsp;
406 } hic;
407 u16 retries = FW_PHY_ACT_RETRIES;
408 s32 rc;
409 u16 i;
410
411 do {
412 memset(&hic, 0, sizeof(hic));
413 hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD;
414 hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN;
415 hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
416 hic.cmd.port_number = hw->bus.lan_id;
417 hic.cmd.activity_id = IXGBE_CPU_TO_LE16(activity);
418 for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
419 hic.cmd.data[i] = IXGBE_CPU_TO_BE32((*data)[i]);
420
421 rc = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
422 sizeof(hic.cmd),
423 IXGBE_HI_COMMAND_TIMEOUT,
424 TRUE);
425 if (rc != IXGBE_SUCCESS)
426 return rc;
427 if (hic.rsp.hdr.cmd_or_resp.ret_status ==
428 FW_CEM_RESP_STATUS_SUCCESS) {
429 for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
430 (*data)[i] = IXGBE_BE32_TO_CPU(hic.rsp.data[i]);
431 return IXGBE_SUCCESS;
432 }
433 usec_delay(20);
434 --retries;
435 } while (retries > 0);
436
437 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
438 }
439
440 static const struct {
441 u16 fw_speed;
442 ixgbe_link_speed phy_speed;
443 } ixgbe_fw_map[] = {
444 { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL },
445 { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL },
446 { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL },
447 { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL },
448 { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL },
449 { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL },
450 };
451
452 /**
453 * ixgbe_get_phy_id_fw - Get the phy ID via firmware command
454 * @hw: pointer to hardware structure
455 *
456 * Returns error code
457 */
ixgbe_get_phy_id_fw(struct ixgbe_hw * hw)458 static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
459 {
460 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
461 u16 phy_speeds;
462 u16 phy_id_lo;
463 s32 rc;
464 u16 i;
465
466 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info);
467 if (rc)
468 return rc;
469
470 hw->phy.speeds_supported = 0;
471 phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK;
472 for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
473 if (phy_speeds & ixgbe_fw_map[i].fw_speed)
474 hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed;
475 }
476 if (!hw->phy.autoneg_advertised)
477 hw->phy.autoneg_advertised = hw->phy.speeds_supported;
478
479 hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK;
480 phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK;
481 hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK;
482 hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK;
483 if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK)
484 return IXGBE_ERR_PHY_ADDR_INVALID;
485 return IXGBE_SUCCESS;
486 }
487
488 /**
489 * ixgbe_identify_phy_fw - Get PHY type based on firmware command
490 * @hw: pointer to hardware structure
491 *
492 * Returns error code
493 */
ixgbe_identify_phy_fw(struct ixgbe_hw * hw)494 static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
495 {
496 if (hw->bus.lan_id)
497 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
498 else
499 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
500
501 hw->phy.type = ixgbe_phy_fw;
502 hw->phy.ops.read_reg = NULL;
503 hw->phy.ops.write_reg = NULL;
504 return ixgbe_get_phy_id_fw(hw);
505 }
506
507 /**
508 * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY
509 * @hw: pointer to hardware structure
510 *
511 * Returns error code
512 */
ixgbe_shutdown_fw_phy(struct ixgbe_hw * hw)513 s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
514 {
515 u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
516
517 setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF;
518 return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup);
519 }
520
ixgbe_read_phy_reg_x550em(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u16 * phy_data)521 static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
522 u32 device_type, u16 *phy_data)
523 {
524 UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, *phy_data);
525 return IXGBE_NOT_IMPLEMENTED;
526 }
527
ixgbe_write_phy_reg_x550em(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u16 phy_data)528 static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
529 u32 device_type, u16 phy_data)
530 {
531 UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, phy_data);
532 return IXGBE_NOT_IMPLEMENTED;
533 }
534
535 /**
536 * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
537 * @hw: pointer to the hardware structure
538 * @addr: I2C bus address to read from
539 * @reg: I2C device register to read from
540 * @val: pointer to location to receive read value
541 *
542 * Returns an error code on error.
543 **/
ixgbe_read_i2c_combined_generic(struct ixgbe_hw * hw,u8 addr,u16 reg,u16 * val)544 static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
545 u16 reg, u16 *val)
546 {
547 return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, TRUE);
548 }
549
550 /**
551 * ixgbe_read_i2c_combined_generic_unlocked - Do I2C read combined operation
552 * @hw: pointer to the hardware structure
553 * @addr: I2C bus address to read from
554 * @reg: I2C device register to read from
555 * @val: pointer to location to receive read value
556 *
557 * Returns an error code on error.
558 **/
559 static s32
ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw * hw,u8 addr,u16 reg,u16 * val)560 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
561 u16 reg, u16 *val)
562 {
563 return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, FALSE);
564 }
565
566 /**
567 * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
568 * @hw: pointer to the hardware structure
569 * @addr: I2C bus address to write to
570 * @reg: I2C device register to write to
571 * @val: value to write
572 *
573 * Returns an error code on error.
574 **/
ixgbe_write_i2c_combined_generic(struct ixgbe_hw * hw,u8 addr,u16 reg,u16 val)575 static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
576 u8 addr, u16 reg, u16 val)
577 {
578 return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, TRUE);
579 }
580
581 /**
582 * ixgbe_write_i2c_combined_generic_unlocked - Do I2C write combined operation
583 * @hw: pointer to the hardware structure
584 * @addr: I2C bus address to write to
585 * @reg: I2C device register to write to
586 * @val: value to write
587 *
588 * Returns an error code on error.
589 **/
590 static s32
ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw * hw,u8 addr,u16 reg,u16 val)591 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
592 u8 addr, u16 reg, u16 val)
593 {
594 return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, FALSE);
595 }
596
597 /**
598 * ixgbe_init_ops_X550EM - Inits func ptrs and MAC type
599 * @hw: pointer to hardware structure
600 *
601 * Initialize the function pointers and for MAC type X550EM.
602 * Does not touch the hardware.
603 **/
ixgbe_init_ops_X550EM(struct ixgbe_hw * hw)604 s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
605 {
606 struct ixgbe_mac_info *mac = &hw->mac;
607 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
608 struct ixgbe_phy_info *phy = &hw->phy;
609 s32 ret_val;
610
611 DEBUGFUNC("ixgbe_init_ops_X550EM");
612
613 /* Similar to X550 so start there. */
614 ret_val = ixgbe_init_ops_X550(hw);
615
616 /* Since this function eventually calls
617 * ixgbe_init_ops_540 by design, we are setting
618 * the pointers to NULL explicitly here to overwrite
619 * the values being set in the x540 function.
620 */
621
622 /* Bypass not supported in x550EM */
623 mac->ops.bypass_rw = NULL;
624 mac->ops.bypass_valid_rd = NULL;
625 mac->ops.bypass_set = NULL;
626 mac->ops.bypass_rd_eep = NULL;
627
628 /* FCOE not supported in x550EM */
629 mac->ops.get_san_mac_addr = NULL;
630 mac->ops.set_san_mac_addr = NULL;
631 mac->ops.get_wwn_prefix = NULL;
632 mac->ops.get_fcoe_boot_status = NULL;
633
634 /* IPsec not supported in x550EM */
635 mac->ops.disable_sec_rx_path = NULL;
636 mac->ops.enable_sec_rx_path = NULL;
637
638 /* AUTOC register is not present in x550EM. */
639 mac->ops.prot_autoc_read = NULL;
640 mac->ops.prot_autoc_write = NULL;
641
642 /* X550EM bus type is internal*/
643 hw->bus.type = ixgbe_bus_type_internal;
644 mac->ops.get_bus_info = ixgbe_get_bus_info_X550em;
645
646
647 mac->ops.get_media_type = ixgbe_get_media_type_X550em;
648 mac->ops.setup_sfp = ixgbe_setup_sfp_modules_X550em;
649 mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_X550em;
650 mac->ops.reset_hw = ixgbe_reset_hw_X550em;
651 mac->ops.get_supported_physical_layer =
652 ixgbe_get_supported_physical_layer_X550em;
653
654 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
655 mac->ops.setup_fc = ixgbe_setup_fc_generic;
656 else
657 mac->ops.setup_fc = ixgbe_setup_fc_X550em;
658
659 /* PHY */
660 phy->ops.init = ixgbe_init_phy_ops_X550em;
661 switch (hw->device_id) {
662 case IXGBE_DEV_ID_X550EM_A_1G_T:
663 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
664 mac->ops.setup_fc = NULL;
665 phy->ops.identify = ixgbe_identify_phy_fw;
666 phy->ops.set_phy_power = NULL;
667 phy->ops.get_firmware_version = NULL;
668 break;
669 case IXGBE_DEV_ID_X550EM_X_1G_T:
670 mac->ops.setup_fc = NULL;
671 phy->ops.identify = ixgbe_identify_phy_x550em;
672 phy->ops.set_phy_power = NULL;
673 break;
674 default:
675 phy->ops.identify = ixgbe_identify_phy_x550em;
676 }
677
678 if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
679 phy->ops.set_phy_power = NULL;
680
681
682 /* EEPROM */
683 eeprom->ops.init_params = ixgbe_init_eeprom_params_X540;
684 eeprom->ops.read = ixgbe_read_ee_hostif_X550;
685 eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
686 eeprom->ops.write = ixgbe_write_ee_hostif_X550;
687 eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
688 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
689 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
690 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
691
692 return ret_val;
693 }
694
695 /**
696 * ixgbe_setup_fw_link - Setup firmware-controlled PHYs
697 * @hw: pointer to hardware structure
698 */
ixgbe_setup_fw_link(struct ixgbe_hw * hw)699 static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
700 {
701 u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
702 s32 rc;
703 u16 i;
704
705 if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
706 return 0;
707
708 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
709 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
710 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
711 return IXGBE_ERR_INVALID_LINK_SETTINGS;
712 }
713
714 switch (hw->fc.requested_mode) {
715 case ixgbe_fc_full:
716 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX <<
717 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
718 break;
719 case ixgbe_fc_rx_pause:
720 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX <<
721 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
722 break;
723 case ixgbe_fc_tx_pause:
724 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX <<
725 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
726 break;
727 default:
728 break;
729 }
730
731 for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
732 if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed)
733 setup[0] |= ixgbe_fw_map[i].fw_speed;
734 }
735 setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN;
736
737 if (hw->phy.eee_speeds_advertised)
738 setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE;
739
740 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup);
741 if (rc)
742 return rc;
743 if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN)
744 return IXGBE_ERR_OVERTEMP;
745 return IXGBE_SUCCESS;
746 }
747
748 /**
749 * ixgbe_fc_autoneg_fw _ Set up flow control for FW-controlled PHYs
750 * @hw: pointer to hardware structure
751 *
752 * Called at init time to set up flow control.
753 */
ixgbe_fc_autoneg_fw(struct ixgbe_hw * hw)754 static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
755 {
756 if (hw->fc.requested_mode == ixgbe_fc_default)
757 hw->fc.requested_mode = ixgbe_fc_full;
758
759 return ixgbe_setup_fw_link(hw);
760 }
761
762 /**
763 * ixgbe_setup_eee_fw - Enable/disable EEE support
764 * @hw: pointer to the HW structure
765 * @enable_eee: boolean flag to enable EEE
766 *
767 * Enable/disable EEE based on enable_eee flag.
768 * This function controls EEE for firmware-based PHY implementations.
769 */
ixgbe_setup_eee_fw(struct ixgbe_hw * hw,bool enable_eee)770 static s32 ixgbe_setup_eee_fw(struct ixgbe_hw *hw, bool enable_eee)
771 {
772 if (!!hw->phy.eee_speeds_advertised == enable_eee)
773 return IXGBE_SUCCESS;
774 if (enable_eee)
775 hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
776 else
777 hw->phy.eee_speeds_advertised = 0;
778 return hw->phy.ops.setup_link(hw);
779 }
780
781 /**
782 * ixgbe_init_ops_X550EM_a - Inits func ptrs and MAC type
783 * @hw: pointer to hardware structure
784 *
785 * Initialize the function pointers and for MAC type X550EM_a.
786 * Does not touch the hardware.
787 **/
ixgbe_init_ops_X550EM_a(struct ixgbe_hw * hw)788 s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw)
789 {
790 struct ixgbe_mac_info *mac = &hw->mac;
791 s32 ret_val;
792
793 DEBUGFUNC("ixgbe_init_ops_X550EM_a");
794
795 /* Start with generic X550EM init */
796 ret_val = ixgbe_init_ops_X550EM(hw);
797
798 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
799 hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) {
800 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
801 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
802 } else {
803 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a;
804 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a;
805 }
806 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550a;
807 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550a;
808
809 switch (mac->ops.get_media_type(hw)) {
810 case ixgbe_media_type_fiber:
811 mac->ops.setup_fc = NULL;
812 mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a;
813 break;
814 case ixgbe_media_type_backplane:
815 mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a;
816 mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a;
817 break;
818 default:
819 break;
820 }
821
822 switch (hw->device_id) {
823 case IXGBE_DEV_ID_X550EM_A_1G_T:
824 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
825 mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a;
826 mac->ops.setup_fc = ixgbe_fc_autoneg_fw;
827 mac->ops.setup_eee = ixgbe_setup_eee_fw;
828 hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL |
829 IXGBE_LINK_SPEED_1GB_FULL;
830 hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
831 break;
832 default:
833 break;
834 }
835
836 return ret_val;
837 }
838
839 /**
840 * ixgbe_init_ops_X550EM_x - Inits func ptrs and MAC type
841 * @hw: pointer to hardware structure
842 *
843 * Initialize the function pointers and for MAC type X550EM_x.
844 * Does not touch the hardware.
845 **/
ixgbe_init_ops_X550EM_x(struct ixgbe_hw * hw)846 s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw)
847 {
848 struct ixgbe_mac_info *mac = &hw->mac;
849 struct ixgbe_link_info *link = &hw->link;
850 s32 ret_val;
851
852 DEBUGFUNC("ixgbe_init_ops_X550EM_x");
853
854 /* Start with generic X550EM init */
855 ret_val = ixgbe_init_ops_X550EM(hw);
856
857 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
858 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
859 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550em;
860 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550em;
861 link->ops.read_link = ixgbe_read_i2c_combined_generic;
862 link->ops.read_link_unlocked = ixgbe_read_i2c_combined_generic_unlocked;
863 link->ops.write_link = ixgbe_write_i2c_combined_generic;
864 link->ops.write_link_unlocked =
865 ixgbe_write_i2c_combined_generic_unlocked;
866 link->addr = IXGBE_CS4227;
867
868 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) {
869 mac->ops.setup_fc = NULL;
870 mac->ops.setup_eee = NULL;
871 mac->ops.init_led_link_act = NULL;
872 }
873
874 return ret_val;
875 }
876
877 /**
878 * ixgbe_dmac_config_X550
879 * @hw: pointer to hardware structure
880 *
881 * Configure DMA coalescing. If enabling dmac, dmac is activated.
882 * When disabling dmac, dmac enable dmac bit is cleared.
883 **/
ixgbe_dmac_config_X550(struct ixgbe_hw * hw)884 s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw)
885 {
886 u32 reg, high_pri_tc;
887
888 DEBUGFUNC("ixgbe_dmac_config_X550");
889
890 /* Disable DMA coalescing before configuring */
891 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
892 reg &= ~IXGBE_DMACR_DMAC_EN;
893 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
894
895 /* Disable DMA Coalescing if the watchdog timer is 0 */
896 if (!hw->mac.dmac_config.watchdog_timer)
897 goto out;
898
899 ixgbe_dmac_config_tcs_X550(hw);
900
901 /* Configure DMA Coalescing Control Register */
902 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
903
904 /* Set the watchdog timer in units of 40.96 usec */
905 reg &= ~IXGBE_DMACR_DMACWT_MASK;
906 reg |= (hw->mac.dmac_config.watchdog_timer * 100) / 4096;
907
908 reg &= ~IXGBE_DMACR_HIGH_PRI_TC_MASK;
909 /* If fcoe is enabled, set high priority traffic class */
910 if (hw->mac.dmac_config.fcoe_en) {
911 high_pri_tc = 1 << hw->mac.dmac_config.fcoe_tc;
912 reg |= ((high_pri_tc << IXGBE_DMACR_HIGH_PRI_TC_SHIFT) &
913 IXGBE_DMACR_HIGH_PRI_TC_MASK);
914 }
915 reg |= IXGBE_DMACR_EN_MNG_IND;
916
917 /* Enable DMA coalescing after configuration */
918 reg |= IXGBE_DMACR_DMAC_EN;
919 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
920
921 out:
922 return IXGBE_SUCCESS;
923 }
924
925 /**
926 * ixgbe_dmac_config_tcs_X550
927 * @hw: pointer to hardware structure
928 *
929 * Configure DMA coalescing threshold per TC. The dmac enable bit must
930 * be cleared before configuring.
931 **/
ixgbe_dmac_config_tcs_X550(struct ixgbe_hw * hw)932 s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw)
933 {
934 u32 tc, reg, pb_headroom, rx_pb_size, maxframe_size_kb;
935
936 DEBUGFUNC("ixgbe_dmac_config_tcs_X550");
937
938 /* Configure DMA coalescing enabled */
939 switch (hw->mac.dmac_config.link_speed) {
940 case IXGBE_LINK_SPEED_10_FULL:
941 case IXGBE_LINK_SPEED_100_FULL:
942 pb_headroom = IXGBE_DMACRXT_100M;
943 break;
944 case IXGBE_LINK_SPEED_1GB_FULL:
945 pb_headroom = IXGBE_DMACRXT_1G;
946 break;
947 default:
948 pb_headroom = IXGBE_DMACRXT_10G;
949 break;
950 }
951
952 maxframe_size_kb = ((IXGBE_READ_REG(hw, IXGBE_MAXFRS) >>
953 IXGBE_MHADD_MFS_SHIFT) / 1024);
954
955 /* Set the per Rx packet buffer receive threshold */
956 for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) {
957 reg = IXGBE_READ_REG(hw, IXGBE_DMCTH(tc));
958 reg &= ~IXGBE_DMCTH_DMACRXT_MASK;
959
960 if (tc < hw->mac.dmac_config.num_tcs) {
961 /* Get Rx PB size */
962 rx_pb_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc));
963 rx_pb_size = (rx_pb_size & IXGBE_RXPBSIZE_MASK) >>
964 IXGBE_RXPBSIZE_SHIFT;
965
966 /* Calculate receive buffer threshold in kilobytes */
967 if (rx_pb_size > pb_headroom)
968 rx_pb_size = rx_pb_size - pb_headroom;
969 else
970 rx_pb_size = 0;
971
972 /* Minimum of MFS shall be set for DMCTH */
973 reg |= (rx_pb_size > maxframe_size_kb) ?
974 rx_pb_size : maxframe_size_kb;
975 }
976 IXGBE_WRITE_REG(hw, IXGBE_DMCTH(tc), reg);
977 }
978 return IXGBE_SUCCESS;
979 }
980
981 /**
982 * ixgbe_dmac_update_tcs_X550
983 * @hw: pointer to hardware structure
984 *
985 * Disables dmac, updates per TC settings, and then enables dmac.
986 **/
ixgbe_dmac_update_tcs_X550(struct ixgbe_hw * hw)987 s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw)
988 {
989 u32 reg;
990
991 DEBUGFUNC("ixgbe_dmac_update_tcs_X550");
992
993 /* Disable DMA coalescing before configuring */
994 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
995 reg &= ~IXGBE_DMACR_DMAC_EN;
996 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
997
998 ixgbe_dmac_config_tcs_X550(hw);
999
1000 /* Enable DMA coalescing after configuration */
1001 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1002 reg |= IXGBE_DMACR_DMAC_EN;
1003 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1004
1005 return IXGBE_SUCCESS;
1006 }
1007
1008 /**
1009 * ixgbe_init_eeprom_params_X550 - Initialize EEPROM params
1010 * @hw: pointer to hardware structure
1011 *
1012 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
1013 * ixgbe_hw struct in order to set up EEPROM access.
1014 **/
ixgbe_init_eeprom_params_X550(struct ixgbe_hw * hw)1015 s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
1016 {
1017 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1018 u32 eec;
1019 u16 eeprom_size;
1020
1021 DEBUGFUNC("ixgbe_init_eeprom_params_X550");
1022
1023 if (eeprom->type == ixgbe_eeprom_uninitialized) {
1024 eeprom->semaphore_delay = 10;
1025 eeprom->type = ixgbe_flash;
1026
1027 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1028 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1029 IXGBE_EEC_SIZE_SHIFT);
1030 eeprom->word_size = 1 << (eeprom_size +
1031 IXGBE_EEPROM_WORD_SIZE_SHIFT);
1032
1033 DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
1034 eeprom->type, eeprom->word_size);
1035 }
1036
1037 return IXGBE_SUCCESS;
1038 }
1039
1040 /**
1041 * ixgbe_set_source_address_pruning_X550 - Enable/Disbale source address pruning
1042 * @hw: pointer to hardware structure
1043 * @enable: enable or disable source address pruning
1044 * @pool: Rx pool to set source address pruning for
1045 **/
ixgbe_set_source_address_pruning_X550(struct ixgbe_hw * hw,bool enable,unsigned int pool)1046 void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable,
1047 unsigned int pool)
1048 {
1049 u64 pfflp;
1050
1051 /* max rx pool is 63 */
1052 if (pool > 63)
1053 return;
1054
1055 pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL);
1056 pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32;
1057
1058 if (enable)
1059 pfflp |= (1ULL << pool);
1060 else
1061 pfflp &= ~(1ULL << pool);
1062
1063 IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp);
1064 IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32));
1065 }
1066
1067 /**
1068 * ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype anti-spoofing
1069 * @hw: pointer to hardware structure
1070 * @enable: enable or disable switch for Ethertype anti-spoofing
1071 * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
1072 *
1073 **/
ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw * hw,bool enable,int vf)1074 void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
1075 bool enable, int vf)
1076 {
1077 int vf_target_reg = vf >> 3;
1078 int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
1079 u32 pfvfspoof;
1080
1081 DEBUGFUNC("ixgbe_set_ethertype_anti_spoofing_X550");
1082
1083 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
1084 if (enable)
1085 pfvfspoof |= (1 << vf_target_shift);
1086 else
1087 pfvfspoof &= ~(1 << vf_target_shift);
1088
1089 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
1090 }
1091
1092 /**
1093 * ixgbe_iosf_wait - Wait for IOSF command completion
1094 * @hw: pointer to hardware structure
1095 * @ctrl: pointer to location to receive final IOSF control value
1096 *
1097 * Returns failing status on timeout
1098 *
1099 * Note: ctrl can be NULL if the IOSF control register value is not needed
1100 **/
ixgbe_iosf_wait(struct ixgbe_hw * hw,u32 * ctrl)1101 static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
1102 {
1103 u32 i, command = 0;
1104
1105 /* Check every 10 usec to see if the address cycle completed.
1106 * The SB IOSF BUSY bit will clear when the operation is
1107 * complete
1108 */
1109 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
1110 command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
1111 if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
1112 break;
1113 usec_delay(10);
1114 }
1115 if (ctrl)
1116 *ctrl = command;
1117 if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
1118 ERROR_REPORT1(IXGBE_ERROR_POLLING, "Wait timed out\n");
1119 return IXGBE_ERR_PHY;
1120 }
1121
1122 return IXGBE_SUCCESS;
1123 }
1124
1125 /**
1126 * ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register
1127 * of the IOSF device
1128 * @hw: pointer to hardware structure
1129 * @reg_addr: 32 bit PHY register to write
1130 * @device_type: 3 bit device type
1131 * @data: Data to write to the register
1132 **/
ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u32 data)1133 s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
1134 u32 device_type, u32 data)
1135 {
1136 u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
1137 u32 command, error __unused;
1138 s32 ret;
1139
1140 ret = ixgbe_acquire_swfw_semaphore(hw, gssr);
1141 if (ret != IXGBE_SUCCESS)
1142 return ret;
1143
1144 ret = ixgbe_iosf_wait(hw, NULL);
1145 if (ret != IXGBE_SUCCESS)
1146 goto out;
1147
1148 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
1149 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
1150
1151 /* Write IOSF control register */
1152 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
1153
1154 /* Write IOSF data register */
1155 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
1156
1157 ret = ixgbe_iosf_wait(hw, &command);
1158
1159 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
1160 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
1161 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
1162 ERROR_REPORT2(IXGBE_ERROR_POLLING,
1163 "Failed to write, error %x\n", error);
1164 ret = IXGBE_ERR_PHY;
1165 }
1166
1167 out:
1168 ixgbe_release_swfw_semaphore(hw, gssr);
1169 return ret;
1170 }
1171
1172 /**
1173 * ixgbe_read_iosf_sb_reg_x550 - Reads specified register of the IOSF device
1174 * @hw: pointer to hardware structure
1175 * @reg_addr: 32 bit PHY register to write
1176 * @device_type: 3 bit device type
1177 * @data: Pointer to read data from the register
1178 **/
ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u32 * data)1179 s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
1180 u32 device_type, u32 *data)
1181 {
1182 u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
1183 u32 command, error __unused;
1184 s32 ret;
1185
1186 ret = ixgbe_acquire_swfw_semaphore(hw, gssr);
1187 if (ret != IXGBE_SUCCESS)
1188 return ret;
1189
1190 ret = ixgbe_iosf_wait(hw, NULL);
1191 if (ret != IXGBE_SUCCESS)
1192 goto out;
1193
1194 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
1195 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
1196
1197 /* Write IOSF control register */
1198 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
1199
1200 ret = ixgbe_iosf_wait(hw, &command);
1201
1202 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
1203 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
1204 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
1205 ERROR_REPORT2(IXGBE_ERROR_POLLING,
1206 "Failed to read, error %x\n", error);
1207 ret = IXGBE_ERR_PHY;
1208 }
1209
1210 if (ret == IXGBE_SUCCESS)
1211 *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
1212
1213 out:
1214 ixgbe_release_swfw_semaphore(hw, gssr);
1215 return ret;
1216 }
1217
1218 /**
1219 * ixgbe_get_phy_token - Get the token for shared phy access
1220 * @hw: Pointer to hardware structure
1221 */
1222
ixgbe_get_phy_token(struct ixgbe_hw * hw)1223 s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
1224 {
1225 struct ixgbe_hic_phy_token_req token_cmd;
1226 s32 status;
1227
1228 token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
1229 token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
1230 token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
1231 token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1232 token_cmd.port_number = hw->bus.lan_id;
1233 token_cmd.command_type = FW_PHY_TOKEN_REQ;
1234 token_cmd.pad = 0;
1235 status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd,
1236 sizeof(token_cmd),
1237 IXGBE_HI_COMMAND_TIMEOUT,
1238 TRUE);
1239 if (status) {
1240 DEBUGOUT1("Issuing host interface command failed with Status = %d\n",
1241 status);
1242 return status;
1243 }
1244 if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
1245 return IXGBE_SUCCESS;
1246 if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY) {
1247 DEBUGOUT1("Host interface command returned 0x%08x , returning IXGBE_ERR_FW_RESP_INVALID\n",
1248 token_cmd.hdr.cmd_or_resp.ret_status);
1249 return IXGBE_ERR_FW_RESP_INVALID;
1250 }
1251
1252 DEBUGOUT("Returning IXGBE_ERR_TOKEN_RETRY\n");
1253 return IXGBE_ERR_TOKEN_RETRY;
1254 }
1255
1256 /**
1257 * ixgbe_put_phy_token - Put the token for shared phy access
1258 * @hw: Pointer to hardware structure
1259 */
1260
ixgbe_put_phy_token(struct ixgbe_hw * hw)1261 s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
1262 {
1263 struct ixgbe_hic_phy_token_req token_cmd;
1264 s32 status;
1265
1266 token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
1267 token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
1268 token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
1269 token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1270 token_cmd.port_number = hw->bus.lan_id;
1271 token_cmd.command_type = FW_PHY_TOKEN_REL;
1272 token_cmd.pad = 0;
1273 status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd,
1274 sizeof(token_cmd),
1275 IXGBE_HI_COMMAND_TIMEOUT,
1276 TRUE);
1277 if (status)
1278 return status;
1279 if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
1280 return IXGBE_SUCCESS;
1281
1282 DEBUGOUT("Put PHY Token host interface command failed");
1283 return IXGBE_ERR_FW_RESP_INVALID;
1284 }
1285
1286 /**
1287 * ixgbe_write_iosf_sb_reg_x550a - Writes a value to specified register
1288 * of the IOSF device
1289 * @hw: pointer to hardware structure
1290 * @reg_addr: 32 bit PHY register to write
1291 * @device_type: 3 bit device type
1292 * @data: Data to write to the register
1293 **/
ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u32 data)1294 s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
1295 u32 device_type, u32 data)
1296 {
1297 struct ixgbe_hic_internal_phy_req write_cmd;
1298 s32 status;
1299 UNREFERENCED_1PARAMETER(device_type);
1300
1301 memset(&write_cmd, 0, sizeof(write_cmd));
1302 write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
1303 write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
1304 write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1305 write_cmd.port_number = hw->bus.lan_id;
1306 write_cmd.command_type = FW_INT_PHY_REQ_WRITE;
1307 write_cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
1308 write_cmd.write_data = IXGBE_CPU_TO_BE32(data);
1309
1310 status = ixgbe_host_interface_command(hw, (u32 *)&write_cmd,
1311 sizeof(write_cmd),
1312 IXGBE_HI_COMMAND_TIMEOUT, FALSE);
1313
1314 return status;
1315 }
1316
1317 /**
1318 * ixgbe_read_iosf_sb_reg_x550a - Reads specified register of the IOSF device
1319 * @hw: pointer to hardware structure
1320 * @reg_addr: 32 bit PHY register to write
1321 * @device_type: 3 bit device type
1322 * @data: Pointer to read data from the register
1323 **/
ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u32 * data)1324 s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
1325 u32 device_type, u32 *data)
1326 {
1327 union {
1328 struct ixgbe_hic_internal_phy_req cmd;
1329 struct ixgbe_hic_internal_phy_resp rsp;
1330 } hic;
1331 s32 status;
1332 UNREFERENCED_1PARAMETER(device_type);
1333
1334 memset(&hic, 0, sizeof(hic));
1335 hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
1336 hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
1337 hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1338 hic.cmd.port_number = hw->bus.lan_id;
1339 hic.cmd.command_type = FW_INT_PHY_REQ_READ;
1340 hic.cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
1341
1342 status = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
1343 sizeof(hic.cmd),
1344 IXGBE_HI_COMMAND_TIMEOUT, TRUE);
1345
1346 /* Extract the register value from the response. */
1347 *data = IXGBE_BE32_TO_CPU(hic.rsp.read_data);
1348
1349 return status;
1350 }
1351
1352 /**
1353 * ixgbe_disable_mdd_X550
1354 * @hw: pointer to hardware structure
1355 *
1356 * Disable malicious driver detection
1357 **/
ixgbe_disable_mdd_X550(struct ixgbe_hw * hw)1358 void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw)
1359 {
1360 u32 reg;
1361
1362 DEBUGFUNC("ixgbe_disable_mdd_X550");
1363
1364 /* Disable MDD for TX DMA and interrupt */
1365 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1366 reg &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
1367 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1368
1369 /* Disable MDD for RX and interrupt */
1370 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1371 reg &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
1372 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
1373 }
1374
1375 /**
1376 * ixgbe_enable_mdd_X550
1377 * @hw: pointer to hardware structure
1378 *
1379 * Enable malicious driver detection
1380 **/
ixgbe_enable_mdd_X550(struct ixgbe_hw * hw)1381 void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw)
1382 {
1383 u32 reg;
1384
1385 DEBUGFUNC("ixgbe_enable_mdd_X550");
1386
1387 /* Enable MDD for TX DMA and interrupt */
1388 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1389 reg |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
1390 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1391
1392 /* Enable MDD for RX and interrupt */
1393 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1394 reg |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
1395 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
1396 }
1397
1398 /**
1399 * ixgbe_restore_mdd_vf_X550
1400 * @hw: pointer to hardware structure
1401 * @vf: vf index
1402 *
1403 * Restore VF that was disabled during malicious driver detection event
1404 **/
ixgbe_restore_mdd_vf_X550(struct ixgbe_hw * hw,u32 vf)1405 void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf)
1406 {
1407 u32 idx, reg, num_qs, start_q, bitmask;
1408
1409 DEBUGFUNC("ixgbe_restore_mdd_vf_X550");
1410
1411 /* Map VF to queues */
1412 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
1413 switch (reg & IXGBE_MRQC_MRQE_MASK) {
1414 case IXGBE_MRQC_VMDQRT8TCEN:
1415 num_qs = 8; /* 16 VFs / pools */
1416 bitmask = 0x000000FF;
1417 break;
1418 case IXGBE_MRQC_VMDQRSS32EN:
1419 case IXGBE_MRQC_VMDQRT4TCEN:
1420 num_qs = 4; /* 32 VFs / pools */
1421 bitmask = 0x0000000F;
1422 break;
1423 default: /* 64 VFs / pools */
1424 num_qs = 2;
1425 bitmask = 0x00000003;
1426 break;
1427 }
1428 start_q = vf * num_qs;
1429
1430 /* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */
1431 idx = start_q / 32;
1432 reg = 0;
1433 reg |= (bitmask << (start_q % 32));
1434 IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), reg);
1435 IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), reg);
1436 }
1437
1438 /**
1439 * ixgbe_mdd_event_X550
1440 * @hw: pointer to hardware structure
1441 * @vf_bitmap: vf bitmap of malicious vfs
1442 *
1443 * Handle malicious driver detection event.
1444 **/
ixgbe_mdd_event_X550(struct ixgbe_hw * hw,u32 * vf_bitmap)1445 void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap)
1446 {
1447 u32 wqbr;
1448 u32 i, j, reg, q, shift, vf, idx;
1449
1450 DEBUGFUNC("ixgbe_mdd_event_X550");
1451
1452 /* figure out pool size for mapping to vf's */
1453 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
1454 switch (reg & IXGBE_MRQC_MRQE_MASK) {
1455 case IXGBE_MRQC_VMDQRT8TCEN:
1456 shift = 3; /* 16 VFs / pools */
1457 break;
1458 case IXGBE_MRQC_VMDQRSS32EN:
1459 case IXGBE_MRQC_VMDQRT4TCEN:
1460 shift = 2; /* 32 VFs / pools */
1461 break;
1462 default:
1463 shift = 1; /* 64 VFs / pools */
1464 break;
1465 }
1466
1467 /* Read WQBR_TX and WQBR_RX and check for malicious queues */
1468 for (i = 0; i < 4; i++) {
1469 wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i));
1470 wqbr |= IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i));
1471
1472 if (!wqbr)
1473 continue;
1474
1475 /* Get malicious queue */
1476 for (j = 0; j < 32 && wqbr; j++) {
1477
1478 if (!(wqbr & (1 << j)))
1479 continue;
1480
1481 /* Get queue from bitmask */
1482 q = j + (i * 32);
1483
1484 /* Map queue to vf */
1485 vf = (q >> shift);
1486
1487 /* Set vf bit in vf_bitmap */
1488 idx = vf / 32;
1489 vf_bitmap[idx] |= (1 << (vf % 32));
1490 wqbr &= ~(1 << j);
1491 }
1492 }
1493 }
1494
1495 /**
1496 * ixgbe_get_media_type_X550em - Get media type
1497 * @hw: pointer to hardware structure
1498 *
1499 * Returns the media type (fiber, copper, backplane)
1500 */
ixgbe_get_media_type_X550em(struct ixgbe_hw * hw)1501 enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
1502 {
1503 enum ixgbe_media_type media_type;
1504
1505 DEBUGFUNC("ixgbe_get_media_type_X550em");
1506
1507 /* Detect if there is a copper PHY attached. */
1508 switch (hw->device_id) {
1509 case IXGBE_DEV_ID_X550EM_X_KR:
1510 case IXGBE_DEV_ID_X550EM_X_KX4:
1511 case IXGBE_DEV_ID_X550EM_X_XFI:
1512 case IXGBE_DEV_ID_X550EM_A_KR:
1513 case IXGBE_DEV_ID_X550EM_A_KR_L:
1514 media_type = ixgbe_media_type_backplane;
1515 break;
1516 case IXGBE_DEV_ID_X550EM_X_SFP:
1517 case IXGBE_DEV_ID_X550EM_A_SFP:
1518 case IXGBE_DEV_ID_X550EM_A_SFP_N:
1519 case IXGBE_DEV_ID_X550EM_A_QSFP:
1520 case IXGBE_DEV_ID_X550EM_A_QSFP_N:
1521 media_type = ixgbe_media_type_fiber;
1522 break;
1523 case IXGBE_DEV_ID_X550EM_X_1G_T:
1524 case IXGBE_DEV_ID_X550EM_X_10G_T:
1525 case IXGBE_DEV_ID_X550EM_A_10G_T:
1526 media_type = ixgbe_media_type_copper;
1527 break;
1528 case IXGBE_DEV_ID_X550EM_A_SGMII:
1529 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
1530 media_type = ixgbe_media_type_backplane;
1531 hw->phy.type = ixgbe_phy_sgmii;
1532 break;
1533 case IXGBE_DEV_ID_X550EM_A_1G_T:
1534 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
1535 media_type = ixgbe_media_type_copper;
1536 break;
1537 default:
1538 media_type = ixgbe_media_type_unknown;
1539 break;
1540 }
1541 return media_type;
1542 }
1543
1544 /**
1545 * ixgbe_supported_sfp_modules_X550em - Check if SFP module type is supported
1546 * @hw: pointer to hardware structure
1547 * @linear: TRUE if SFP module is linear
1548 */
ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw * hw,bool * linear)1549 static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
1550 {
1551 DEBUGFUNC("ixgbe_supported_sfp_modules_X550em");
1552
1553 switch (hw->phy.sfp_type) {
1554 case ixgbe_sfp_type_not_present:
1555 return IXGBE_ERR_SFP_NOT_PRESENT;
1556 case ixgbe_sfp_type_da_cu_core0:
1557 case ixgbe_sfp_type_da_cu_core1:
1558 *linear = TRUE;
1559 break;
1560 case ixgbe_sfp_type_srlr_core0:
1561 case ixgbe_sfp_type_srlr_core1:
1562 case ixgbe_sfp_type_da_act_lmt_core0:
1563 case ixgbe_sfp_type_da_act_lmt_core1:
1564 case ixgbe_sfp_type_1g_sx_core0:
1565 case ixgbe_sfp_type_1g_sx_core1:
1566 case ixgbe_sfp_type_1g_lx_core0:
1567 case ixgbe_sfp_type_1g_lx_core1:
1568 *linear = FALSE;
1569 break;
1570 case ixgbe_sfp_type_unknown:
1571 case ixgbe_sfp_type_1g_cu_core0:
1572 case ixgbe_sfp_type_1g_cu_core1:
1573 default:
1574 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1575 }
1576
1577 return IXGBE_SUCCESS;
1578 }
1579
1580 /**
1581 * ixgbe_identify_sfp_module_X550em - Identifies SFP modules
1582 * @hw: pointer to hardware structure
1583 *
1584 * Searches for and identifies the SFP module and assigns appropriate PHY type.
1585 **/
ixgbe_identify_sfp_module_X550em(struct ixgbe_hw * hw)1586 s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw)
1587 {
1588 s32 status;
1589 bool linear;
1590
1591 DEBUGFUNC("ixgbe_identify_sfp_module_X550em");
1592
1593 status = ixgbe_identify_module_generic(hw);
1594
1595 if (status != IXGBE_SUCCESS)
1596 return status;
1597
1598 /* Check if SFP module is supported */
1599 status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
1600
1601 return status;
1602 }
1603
1604 /**
1605 * ixgbe_setup_sfp_modules_X550em - Setup MAC link ops
1606 * @hw: pointer to hardware structure
1607 */
ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw * hw)1608 s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
1609 {
1610 s32 status;
1611 bool linear;
1612
1613 DEBUGFUNC("ixgbe_setup_sfp_modules_X550em");
1614
1615 /* Check if SFP module is supported */
1616 status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
1617
1618 if (status != IXGBE_SUCCESS)
1619 return status;
1620
1621 ixgbe_init_mac_link_ops_X550em(hw);
1622 hw->phy.ops.reset = NULL;
1623
1624 return IXGBE_SUCCESS;
1625 }
1626
1627 /**
1628 * ixgbe_restart_an_internal_phy_x550em - restart autonegotiation for the
1629 * internal PHY
1630 * @hw: pointer to hardware structure
1631 **/
ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw * hw)1632 static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
1633 {
1634 s32 status;
1635 u32 link_ctrl;
1636
1637 /* Restart auto-negotiation. */
1638 status = hw->mac.ops.read_iosf_sb_reg(hw,
1639 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1640 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl);
1641
1642 if (status) {
1643 DEBUGOUT("Auto-negotiation did not complete\n");
1644 return status;
1645 }
1646
1647 link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
1648 status = hw->mac.ops.write_iosf_sb_reg(hw,
1649 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1650 IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl);
1651
1652 if (hw->mac.type == ixgbe_mac_X550EM_a) {
1653 u32 flx_mask_st20;
1654
1655 /* Indicate to FW that AN restart has been asserted */
1656 status = hw->mac.ops.read_iosf_sb_reg(hw,
1657 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1658 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_mask_st20);
1659
1660 if (status) {
1661 DEBUGOUT("Auto-negotiation did not complete\n");
1662 return status;
1663 }
1664
1665 flx_mask_st20 |= IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART;
1666 status = hw->mac.ops.write_iosf_sb_reg(hw,
1667 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1668 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_mask_st20);
1669 }
1670
1671 return status;
1672 }
1673
1674 /**
1675 * ixgbe_setup_sgmii - Set up link for sgmii
1676 * @hw: pointer to hardware structure
1677 * @speed: new link speed
1678 * @autoneg_wait: TRUE when waiting for completion is needed
1679 */
ixgbe_setup_sgmii(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait)1680 static s32 ixgbe_setup_sgmii(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1681 bool autoneg_wait)
1682 {
1683 struct ixgbe_mac_info *mac = &hw->mac;
1684 u32 lval, sval, flx_val;
1685 s32 rc;
1686
1687 rc = mac->ops.read_iosf_sb_reg(hw,
1688 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1689 IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
1690 if (rc)
1691 return rc;
1692
1693 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1694 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1695 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
1696 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
1697 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1698 rc = mac->ops.write_iosf_sb_reg(hw,
1699 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1700 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1701 if (rc)
1702 return rc;
1703
1704 rc = mac->ops.read_iosf_sb_reg(hw,
1705 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1706 IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
1707 if (rc)
1708 return rc;
1709
1710 sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
1711 sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
1712 rc = mac->ops.write_iosf_sb_reg(hw,
1713 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1714 IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
1715 if (rc)
1716 return rc;
1717
1718 rc = mac->ops.read_iosf_sb_reg(hw,
1719 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1720 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
1721 if (rc)
1722 return rc;
1723
1724 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
1725 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
1726 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
1727 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
1728 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
1729
1730 rc = mac->ops.write_iosf_sb_reg(hw,
1731 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1732 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
1733 if (rc)
1734 return rc;
1735
1736 rc = ixgbe_restart_an_internal_phy_x550em(hw);
1737 if (rc)
1738 return rc;
1739
1740 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
1741 }
1742
1743 /**
1744 * ixgbe_setup_sgmii_fw - Set up link for internal PHY SGMII auto-negotiation
1745 * @hw: pointer to hardware structure
1746 * @speed: new link speed
1747 * @autoneg_wait: TRUE when waiting for completion is needed
1748 */
ixgbe_setup_sgmii_fw(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait)1749 static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1750 bool autoneg_wait)
1751 {
1752 struct ixgbe_mac_info *mac = &hw->mac;
1753 u32 lval, sval, flx_val;
1754 s32 rc;
1755
1756 rc = mac->ops.read_iosf_sb_reg(hw,
1757 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1758 IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
1759 if (rc)
1760 return rc;
1761
1762 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1763 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1764 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
1765 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
1766 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1767 rc = mac->ops.write_iosf_sb_reg(hw,
1768 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1769 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1770 if (rc)
1771 return rc;
1772
1773 rc = mac->ops.read_iosf_sb_reg(hw,
1774 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1775 IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
1776 if (rc)
1777 return rc;
1778
1779 sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
1780 sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
1781 rc = mac->ops.write_iosf_sb_reg(hw,
1782 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1783 IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
1784 if (rc)
1785 return rc;
1786
1787 rc = mac->ops.write_iosf_sb_reg(hw,
1788 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1789 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1790 if (rc)
1791 return rc;
1792
1793 rc = mac->ops.read_iosf_sb_reg(hw,
1794 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1795 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
1796 if (rc)
1797 return rc;
1798
1799 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
1800 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
1801 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
1802 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
1803 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
1804
1805 rc = mac->ops.write_iosf_sb_reg(hw,
1806 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1807 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
1808 if (rc)
1809 return rc;
1810
1811 rc = ixgbe_restart_an_internal_phy_x550em(hw);
1812
1813 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
1814 }
1815
1816 /**
1817 * ixgbe_init_mac_link_ops_X550em - init mac link function pointers
1818 * @hw: pointer to hardware structure
1819 */
ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw * hw)1820 void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
1821 {
1822 struct ixgbe_mac_info *mac = &hw->mac;
1823
1824 DEBUGFUNC("ixgbe_init_mac_link_ops_X550em");
1825
1826 switch (hw->mac.ops.get_media_type(hw)) {
1827 case ixgbe_media_type_fiber:
1828 /* CS4227 does not support autoneg, so disable the laser control
1829 * functions for SFP+ fiber
1830 */
1831 mac->ops.disable_tx_laser = NULL;
1832 mac->ops.enable_tx_laser = NULL;
1833 mac->ops.flap_tx_laser = NULL;
1834 mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
1835 mac->ops.set_rate_select_speed =
1836 ixgbe_set_soft_rate_select_speed;
1837
1838 if ((hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) ||
1839 (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP))
1840 mac->ops.setup_mac_link =
1841 ixgbe_setup_mac_link_sfp_x550a;
1842 else
1843 mac->ops.setup_mac_link =
1844 ixgbe_setup_mac_link_sfp_x550em;
1845 break;
1846 case ixgbe_media_type_copper:
1847 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T)
1848 break;
1849 if (hw->mac.type == ixgbe_mac_X550EM_a) {
1850 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
1851 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
1852 mac->ops.setup_link = ixgbe_setup_sgmii_fw;
1853 mac->ops.check_link =
1854 ixgbe_check_mac_link_generic;
1855 } else {
1856 mac->ops.setup_link =
1857 ixgbe_setup_mac_link_t_X550em;
1858 }
1859 } else {
1860 mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
1861 mac->ops.check_link = ixgbe_check_link_t_X550em;
1862 }
1863 break;
1864 case ixgbe_media_type_backplane:
1865 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
1866 hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
1867 mac->ops.setup_link = ixgbe_setup_sgmii;
1868 break;
1869 default:
1870 break;
1871 }
1872 }
1873
1874 /**
1875 * ixgbe_get_link_capabilities_x550em - Determines link capabilities
1876 * @hw: pointer to hardware structure
1877 * @speed: pointer to link speed
1878 * @autoneg: TRUE when autoneg or autotry is enabled
1879 */
ixgbe_get_link_capabilities_X550em(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * autoneg)1880 s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
1881 ixgbe_link_speed *speed,
1882 bool *autoneg)
1883 {
1884 DEBUGFUNC("ixgbe_get_link_capabilities_X550em");
1885
1886
1887 if (hw->phy.type == ixgbe_phy_fw) {
1888 *autoneg = TRUE;
1889 *speed = hw->phy.speeds_supported;
1890 return 0;
1891 }
1892
1893 /* SFP */
1894 if (hw->phy.media_type == ixgbe_media_type_fiber) {
1895
1896 /* CS4227 SFP must not enable auto-negotiation */
1897 *autoneg = FALSE;
1898
1899 /* Check if 1G SFP module. */
1900 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
1901 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1
1902 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
1903 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
1904 *speed = IXGBE_LINK_SPEED_1GB_FULL;
1905 return IXGBE_SUCCESS;
1906 }
1907
1908 /* Link capabilities are based on SFP */
1909 if (hw->phy.multispeed_fiber)
1910 *speed = IXGBE_LINK_SPEED_10GB_FULL |
1911 IXGBE_LINK_SPEED_1GB_FULL;
1912 else
1913 *speed = IXGBE_LINK_SPEED_10GB_FULL;
1914 } else {
1915 switch (hw->phy.type) {
1916 case ixgbe_phy_ext_1g_t:
1917 case ixgbe_phy_sgmii:
1918 *speed = IXGBE_LINK_SPEED_1GB_FULL;
1919 break;
1920 case ixgbe_phy_x550em_kr:
1921 if (hw->mac.type == ixgbe_mac_X550EM_a) {
1922 /* check different backplane modes */
1923 if (hw->phy.nw_mng_if_sel &
1924 IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
1925 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
1926 break;
1927 } else if (hw->device_id ==
1928 IXGBE_DEV_ID_X550EM_A_KR_L) {
1929 *speed = IXGBE_LINK_SPEED_1GB_FULL;
1930 break;
1931 }
1932 }
1933 /* fall through */
1934 default:
1935 *speed = IXGBE_LINK_SPEED_10GB_FULL |
1936 IXGBE_LINK_SPEED_1GB_FULL;
1937 break;
1938 }
1939 *autoneg = TRUE;
1940 }
1941
1942 return IXGBE_SUCCESS;
1943 }
1944
1945 /**
1946 * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause
1947 * @hw: pointer to hardware structure
1948 * @lsc: pointer to boolean flag which indicates whether external Base T
1949 * PHY interrupt is lsc
1950 *
1951 * Determime if external Base T PHY interrupt cause is high temperature
1952 * failure alarm or link status change.
1953 *
1954 * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
1955 * failure alarm, else return PHY access status.
1956 */
ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw * hw,bool * lsc)1957 static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
1958 {
1959 u32 status;
1960 u16 reg;
1961
1962 *lsc = FALSE;
1963
1964 /* Vendor alarm triggered */
1965 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
1966 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
1967 ®);
1968
1969 if (status != IXGBE_SUCCESS ||
1970 !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN))
1971 return status;
1972
1973 /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */
1974 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG,
1975 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
1976 ®);
1977
1978 if (status != IXGBE_SUCCESS ||
1979 !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
1980 IXGBE_MDIO_GLOBAL_ALARM_1_INT)))
1981 return status;
1982
1983 /* Global alarm triggered */
1984 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1,
1985 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
1986 ®);
1987
1988 if (status != IXGBE_SUCCESS)
1989 return status;
1990
1991 /* If high temperature failure, then return over temp error and exit */
1992 if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) {
1993 /* power down the PHY in case the PHY FW didn't already */
1994 ixgbe_set_copper_phy_power(hw, FALSE);
1995 return IXGBE_ERR_OVERTEMP;
1996 } else if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) {
1997 /* device fault alarm triggered */
1998 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG,
1999 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2000 ®);
2001
2002 if (status != IXGBE_SUCCESS)
2003 return status;
2004
2005 /* if device fault was due to high temp alarm handle and exit */
2006 if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) {
2007 /* power down the PHY in case the PHY FW didn't */
2008 ixgbe_set_copper_phy_power(hw, FALSE);
2009 return IXGBE_ERR_OVERTEMP;
2010 }
2011 }
2012
2013 /* Vendor alarm 2 triggered */
2014 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
2015 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2016
2017 if (status != IXGBE_SUCCESS ||
2018 !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT))
2019 return status;
2020
2021 /* link connect/disconnect event occurred */
2022 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2,
2023 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2024
2025 if (status != IXGBE_SUCCESS)
2026 return status;
2027
2028 /* Indicate LSC */
2029 if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC)
2030 *lsc = TRUE;
2031
2032 return IXGBE_SUCCESS;
2033 }
2034
2035 /**
2036 * ixgbe_enable_lasi_ext_t_x550em - Enable external Base T PHY interrupts
2037 * @hw: pointer to hardware structure
2038 *
2039 * Enable link status change and temperature failure alarm for the external
2040 * Base T PHY
2041 *
2042 * Returns PHY access status
2043 */
ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw * hw)2044 static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
2045 {
2046 u32 status;
2047 u16 reg;
2048 bool lsc;
2049
2050 /* Clear interrupt flags */
2051 status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
2052
2053 /* Enable link status change alarm */
2054
2055 /* Enable the LASI interrupts on X552 devices to receive notifications
2056 * of the link configurations of the external PHY and correspondingly
2057 * support the configuration of the internal iXFI link, since iXFI does
2058 * not support auto-negotiation. This is not required for X553 devices
2059 * having KR support, which performs auto-negotiations and which is used
2060 * as the internal link to the external PHY. Hence adding a check here
2061 * to avoid enabling LASI interrupts for X553 devices.
2062 */
2063 if (hw->mac.type != ixgbe_mac_X550EM_a) {
2064 status = hw->phy.ops.read_reg(hw,
2065 IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
2066 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2067
2068 if (status != IXGBE_SUCCESS)
2069 return status;
2070
2071 reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN;
2072
2073 status = hw->phy.ops.write_reg(hw,
2074 IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
2075 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg);
2076
2077 if (status != IXGBE_SUCCESS)
2078 return status;
2079 }
2080
2081 /* Enable high temperature failure and global fault alarms */
2082 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
2083 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2084 ®);
2085
2086 if (status != IXGBE_SUCCESS)
2087 return status;
2088
2089 reg |= (IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN |
2090 IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN);
2091
2092 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
2093 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2094 reg);
2095
2096 if (status != IXGBE_SUCCESS)
2097 return status;
2098
2099 /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */
2100 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
2101 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2102 ®);
2103
2104 if (status != IXGBE_SUCCESS)
2105 return status;
2106
2107 reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
2108 IXGBE_MDIO_GLOBAL_ALARM_1_INT);
2109
2110 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
2111 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2112 reg);
2113
2114 if (status != IXGBE_SUCCESS)
2115 return status;
2116
2117 /* Enable chip-wide vendor alarm */
2118 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
2119 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2120 ®);
2121
2122 if (status != IXGBE_SUCCESS)
2123 return status;
2124
2125 reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN;
2126
2127 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
2128 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2129 reg);
2130
2131 return status;
2132 }
2133
2134 /**
2135 * ixgbe_setup_kr_speed_x550em - Configure the KR PHY for link speed.
2136 * @hw: pointer to hardware structure
2137 * @speed: link speed
2138 *
2139 * Configures the integrated KR PHY.
2140 **/
ixgbe_setup_kr_speed_x550em(struct ixgbe_hw * hw,ixgbe_link_speed speed)2141 static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
2142 ixgbe_link_speed speed)
2143 {
2144 s32 status;
2145 u32 reg_val;
2146
2147 status = hw->mac.ops.read_iosf_sb_reg(hw,
2148 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2149 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2150 if (status)
2151 return status;
2152
2153 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
2154 reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
2155 IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
2156
2157 /* Advertise 10G support. */
2158 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
2159 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
2160
2161 /* Advertise 1G support. */
2162 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
2163 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
2164
2165 status = hw->mac.ops.write_iosf_sb_reg(hw,
2166 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2167 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2168
2169 if (hw->mac.type == ixgbe_mac_X550EM_a) {
2170 /* Set lane mode to KR auto negotiation */
2171 status = hw->mac.ops.read_iosf_sb_reg(hw,
2172 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2173 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2174
2175 if (status)
2176 return status;
2177
2178 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2179 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
2180 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2181 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2182 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2183
2184 status = hw->mac.ops.write_iosf_sb_reg(hw,
2185 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2186 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2187 }
2188
2189 return ixgbe_restart_an_internal_phy_x550em(hw);
2190 }
2191
2192 /**
2193 * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs
2194 * @hw: pointer to hardware structure
2195 */
ixgbe_reset_phy_fw(struct ixgbe_hw * hw)2196 static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
2197 {
2198 u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
2199 s32 rc;
2200
2201 if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
2202 return IXGBE_SUCCESS;
2203
2204 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store);
2205 if (rc)
2206 return rc;
2207 memset(store, 0, sizeof(store));
2208
2209 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store);
2210 if (rc)
2211 return rc;
2212
2213 return ixgbe_setup_fw_link(hw);
2214 }
2215
2216 /**
2217 * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp
2218 * @hw: pointer to hardware structure
2219 */
ixgbe_check_overtemp_fw(struct ixgbe_hw * hw)2220 static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
2221 {
2222 u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
2223 s32 rc;
2224
2225 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store);
2226 if (rc)
2227 return rc;
2228
2229 if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) {
2230 ixgbe_shutdown_fw_phy(hw);
2231 return IXGBE_ERR_OVERTEMP;
2232 }
2233 return IXGBE_SUCCESS;
2234 }
2235
2236 /**
2237 * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register
2238 * @hw: pointer to hardware structure
2239 *
2240 * Read NW_MNG_IF_SEL register and save field values, and check for valid field
2241 * values.
2242 **/
ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw * hw)2243 static s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
2244 {
2245 /* Save NW management interface connected on board. This is used
2246 * to determine internal PHY mode.
2247 */
2248 hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
2249
2250 /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set
2251 * PHY address. This register field was has only been used for X552.
2252 */
2253 if (hw->mac.type == ixgbe_mac_X550EM_a &&
2254 hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) {
2255 hw->phy.addr = (hw->phy.nw_mng_if_sel &
2256 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
2257 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
2258 }
2259
2260 return IXGBE_SUCCESS;
2261 }
2262
2263 /**
2264 * ixgbe_init_phy_ops_X550em - PHY/SFP specific init
2265 * @hw: pointer to hardware structure
2266 *
2267 * Initialize any function pointers that were not able to be
2268 * set during init_shared_code because the PHY/SFP type was
2269 * not known. Perform the SFP init if necessary.
2270 */
ixgbe_init_phy_ops_X550em(struct ixgbe_hw * hw)2271 s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
2272 {
2273 struct ixgbe_phy_info *phy = &hw->phy;
2274 s32 ret_val;
2275
2276 DEBUGFUNC("ixgbe_init_phy_ops_X550em");
2277
2278 hw->mac.ops.set_lan_id(hw);
2279 ixgbe_read_mng_if_sel_x550em(hw);
2280
2281 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
2282 phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
2283 ixgbe_setup_mux_ctl(hw);
2284 phy->ops.identify_sfp = ixgbe_identify_sfp_module_X550em;
2285 }
2286
2287 switch (hw->device_id) {
2288 case IXGBE_DEV_ID_X550EM_A_1G_T:
2289 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2290 phy->ops.read_reg_mdi = NULL;
2291 phy->ops.write_reg_mdi = NULL;
2292 hw->phy.ops.read_reg = NULL;
2293 hw->phy.ops.write_reg = NULL;
2294 phy->ops.check_overtemp = ixgbe_check_overtemp_fw;
2295 if (hw->bus.lan_id)
2296 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
2297 else
2298 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
2299
2300 break;
2301 case IXGBE_DEV_ID_X550EM_A_10G_T:
2302 case IXGBE_DEV_ID_X550EM_A_SFP:
2303 hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
2304 hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
2305 if (hw->bus.lan_id)
2306 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
2307 else
2308 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
2309 break;
2310 case IXGBE_DEV_ID_X550EM_X_SFP:
2311 /* set up for CS4227 usage */
2312 hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
2313 break;
2314 case IXGBE_DEV_ID_X550EM_X_1G_T:
2315 phy->ops.read_reg_mdi = NULL;
2316 phy->ops.write_reg_mdi = NULL;
2317 default:
2318 break;
2319 }
2320
2321 /* Identify the PHY or SFP module */
2322 ret_val = phy->ops.identify(hw);
2323 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED ||
2324 ret_val == IXGBE_ERR_PHY_ADDR_INVALID)
2325 return ret_val;
2326
2327 /* Setup function pointers based on detected hardware */
2328 ixgbe_init_mac_link_ops_X550em(hw);
2329 if (phy->sfp_type != ixgbe_sfp_type_unknown)
2330 phy->ops.reset = NULL;
2331
2332 /* Set functions pointers based on phy type */
2333 switch (hw->phy.type) {
2334 case ixgbe_phy_x550em_kx4:
2335 phy->ops.setup_link = NULL;
2336 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2337 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2338 break;
2339 case ixgbe_phy_x550em_kr:
2340 phy->ops.setup_link = ixgbe_setup_kr_x550em;
2341 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2342 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2343 break;
2344 case ixgbe_phy_ext_1g_t:
2345 /* link is managed by FW */
2346 phy->ops.setup_link = NULL;
2347 phy->ops.reset = NULL;
2348 break;
2349 case ixgbe_phy_x550em_xfi:
2350 /* link is managed by HW */
2351 phy->ops.setup_link = NULL;
2352 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2353 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2354 break;
2355 case ixgbe_phy_x550em_ext_t:
2356 /* If internal link mode is XFI, then setup iXFI internal link,
2357 * else setup KR now.
2358 */
2359 phy->ops.setup_internal_link =
2360 ixgbe_setup_internal_phy_t_x550em;
2361
2362 /* setup SW LPLU only for first revision of X550EM_x */
2363 if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
2364 !(IXGBE_FUSES0_REV_MASK &
2365 IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
2366 phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em;
2367
2368 phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
2369 phy->ops.reset = ixgbe_reset_phy_t_X550em;
2370 break;
2371 case ixgbe_phy_sgmii:
2372 phy->ops.setup_link = NULL;
2373 break;
2374 case ixgbe_phy_fw:
2375 phy->ops.setup_link = ixgbe_setup_fw_link;
2376 phy->ops.reset = ixgbe_reset_phy_fw;
2377 break;
2378 default:
2379 break;
2380 }
2381 return ret_val;
2382 }
2383
2384 /**
2385 * ixgbe_set_mdio_speed - Set MDIO clock speed
2386 * @hw: pointer to hardware structure
2387 */
ixgbe_set_mdio_speed(struct ixgbe_hw * hw)2388 static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
2389 {
2390 u32 hlreg0;
2391
2392 switch (hw->device_id) {
2393 case IXGBE_DEV_ID_X550EM_X_10G_T:
2394 case IXGBE_DEV_ID_X550EM_A_SGMII:
2395 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
2396 case IXGBE_DEV_ID_X550EM_A_10G_T:
2397 case IXGBE_DEV_ID_X550EM_A_SFP:
2398 case IXGBE_DEV_ID_X550EM_A_QSFP:
2399 /* Config MDIO clock speed before the first MDIO PHY access */
2400 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2401 hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
2402 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2403 break;
2404 case IXGBE_DEV_ID_X550EM_A_1G_T:
2405 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2406 /* Select fast MDIO clock speed for these devices */
2407 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2408 hlreg0 |= IXGBE_HLREG0_MDCSPD;
2409 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2410 break;
2411 default:
2412 break;
2413 }
2414 }
2415
2416 /**
2417 * ixgbe_reset_hw_X550em - Perform hardware reset
2418 * @hw: pointer to hardware structure
2419 *
2420 * Resets the hardware by resetting the transmit and receive units, masks
2421 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
2422 * reset.
2423 */
ixgbe_reset_hw_X550em(struct ixgbe_hw * hw)2424 s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
2425 {
2426 ixgbe_link_speed link_speed;
2427 s32 status;
2428 u32 ctrl = 0;
2429 u32 i;
2430 bool link_up = FALSE;
2431 u32 swfw_mask = hw->phy.phy_semaphore_mask;
2432
2433 DEBUGFUNC("ixgbe_reset_hw_X550em");
2434
2435 /* Call adapter stop to disable Tx/Rx and clear interrupts */
2436 status = hw->mac.ops.stop_adapter(hw);
2437 if (status != IXGBE_SUCCESS) {
2438 DEBUGOUT1("Failed to stop adapter, STATUS = %d\n", status);
2439 return status;
2440 }
2441 /* flush pending Tx transactions */
2442 ixgbe_clear_tx_pending(hw);
2443
2444 ixgbe_set_mdio_speed(hw);
2445
2446 /* PHY ops must be identified and initialized prior to reset */
2447 status = hw->phy.ops.init(hw);
2448
2449 if (status)
2450 DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n",
2451 status);
2452
2453 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED ||
2454 status == IXGBE_ERR_PHY_ADDR_INVALID) {
2455 DEBUGOUT("Returning from reset HW due to PHY init failure\n");
2456 return status;
2457 }
2458
2459 /* start the external PHY */
2460 if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
2461 status = ixgbe_init_ext_t_x550em(hw);
2462 if (status) {
2463 DEBUGOUT1("Failed to start the external PHY, STATUS = %d\n",
2464 status);
2465 return status;
2466 }
2467 }
2468
2469 /* Setup SFP module if there is one present. */
2470 if (hw->phy.sfp_setup_needed) {
2471 status = hw->mac.ops.setup_sfp(hw);
2472 hw->phy.sfp_setup_needed = FALSE;
2473 }
2474
2475 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
2476 return status;
2477
2478 /* Reset PHY */
2479 if (!hw->phy.reset_disable && hw->phy.ops.reset) {
2480 if (hw->phy.ops.reset(hw) == IXGBE_ERR_OVERTEMP)
2481 return IXGBE_ERR_OVERTEMP;
2482 }
2483
2484 mac_reset_top:
2485 /* Issue global reset to the MAC. Needs to be SW reset if link is up.
2486 * If link reset is used when link is up, it might reset the PHY when
2487 * mng is using it. If link is down or the flag to force full link
2488 * reset is set, then perform link reset.
2489 */
2490 ctrl = IXGBE_CTRL_LNK_RST;
2491 if (!hw->force_full_reset) {
2492 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
2493 if (link_up)
2494 ctrl = IXGBE_CTRL_RST;
2495 }
2496
2497 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
2498 if (status != IXGBE_SUCCESS) {
2499 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
2500 "semaphore failed with %d", status);
2501 return IXGBE_ERR_SWFW_SYNC;
2502 }
2503 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
2504 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
2505 IXGBE_WRITE_FLUSH(hw);
2506 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2507
2508 /* Poll for reset bit to self-clear meaning reset is complete */
2509 for (i = 0; i < 10; i++) {
2510 usec_delay(1);
2511 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
2512 if (!(ctrl & IXGBE_CTRL_RST_MASK))
2513 break;
2514 }
2515
2516 if (ctrl & IXGBE_CTRL_RST_MASK) {
2517 status = IXGBE_ERR_RESET_FAILED;
2518 DEBUGOUT("Reset polling failed to complete.\n");
2519 }
2520
2521 msec_delay(50);
2522
2523 /* Double resets are required for recovery from certain error
2524 * conditions. Between resets, it is necessary to stall to
2525 * allow time for any pending HW events to complete.
2526 */
2527 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
2528 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2529 goto mac_reset_top;
2530 }
2531
2532 /* Store the permanent mac address */
2533 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
2534
2535 /* Store MAC address from RAR0, clear receive address registers, and
2536 * clear the multicast table. Also reset num_rar_entries to 128,
2537 * since we modify this value when programming the SAN MAC address.
2538 */
2539 hw->mac.num_rar_entries = 128;
2540 hw->mac.ops.init_rx_addrs(hw);
2541
2542 ixgbe_set_mdio_speed(hw);
2543
2544 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
2545 ixgbe_setup_mux_ctl(hw);
2546
2547 if (status != IXGBE_SUCCESS)
2548 DEBUGOUT1("Reset HW failed, STATUS = %d\n", status);
2549
2550 return status;
2551 }
2552
2553 /**
2554 * ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
2555 * @hw: pointer to hardware structure
2556 */
ixgbe_init_ext_t_x550em(struct ixgbe_hw * hw)2557 s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
2558 {
2559 u32 status;
2560 u16 reg;
2561
2562 status = hw->phy.ops.read_reg(hw,
2563 IXGBE_MDIO_TX_VENDOR_ALARMS_3,
2564 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
2565 ®);
2566
2567 if (status != IXGBE_SUCCESS)
2568 return status;
2569
2570 /* If PHY FW reset completed bit is set then this is the first
2571 * SW instance after a power on so the PHY FW must be un-stalled.
2572 */
2573 if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
2574 status = hw->phy.ops.read_reg(hw,
2575 IXGBE_MDIO_GLOBAL_RES_PR_10,
2576 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2577 ®);
2578
2579 if (status != IXGBE_SUCCESS)
2580 return status;
2581
2582 reg &= ~IXGBE_MDIO_POWER_UP_STALL;
2583
2584 status = hw->phy.ops.write_reg(hw,
2585 IXGBE_MDIO_GLOBAL_RES_PR_10,
2586 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2587 reg);
2588
2589 if (status != IXGBE_SUCCESS)
2590 return status;
2591 }
2592
2593 return status;
2594 }
2595
2596 /**
2597 * ixgbe_setup_kr_x550em - Configure the KR PHY.
2598 * @hw: pointer to hardware structure
2599 **/
ixgbe_setup_kr_x550em(struct ixgbe_hw * hw)2600 s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
2601 {
2602 /* leave link alone for 2.5G */
2603 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
2604 return IXGBE_SUCCESS;
2605
2606 if (ixgbe_check_reset_blocked(hw))
2607 return 0;
2608
2609 return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
2610 }
2611
2612 /**
2613 * ixgbe_setup_mac_link_sfp_x550em - Setup internal/external the PHY for SFP
2614 * @hw: pointer to hardware structure
2615 * @speed: new link speed
2616 * @autoneg_wait_to_complete: unused
2617 *
2618 * Configure the external PHY and the integrated KR PHY for SFP support.
2619 **/
ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)2620 s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
2621 ixgbe_link_speed speed,
2622 bool autoneg_wait_to_complete)
2623 {
2624 s32 ret_val;
2625 u16 reg_slice, reg_val;
2626 bool setup_linear = FALSE;
2627 UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
2628
2629 /* Check if SFP module is supported and linear */
2630 ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
2631
2632 /* If no SFP module present, then return success. Return success since
2633 * there is no reason to configure CS4227 and SFP not present error is
2634 * not excepted in the setup MAC link flow.
2635 */
2636 if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
2637 return IXGBE_SUCCESS;
2638
2639 if (ret_val != IXGBE_SUCCESS)
2640 return ret_val;
2641
2642 /* Configure internal PHY for KR/KX. */
2643 ixgbe_setup_kr_speed_x550em(hw, speed);
2644
2645 /* Configure CS4227 LINE side to proper mode. */
2646 reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB +
2647 (hw->bus.lan_id << 12);
2648 if (setup_linear)
2649 reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
2650 else
2651 reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
2652 ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
2653 reg_val);
2654 return ret_val;
2655 }
2656
2657 /**
2658 * ixgbe_setup_sfi_x550a - Configure the internal PHY for native SFI mode
2659 * @hw: pointer to hardware structure
2660 * @speed: the link speed to force
2661 *
2662 * Configures the integrated PHY for native SFI mode. Used to connect the
2663 * internal PHY directly to an SFP cage, without autonegotiation.
2664 **/
ixgbe_setup_sfi_x550a(struct ixgbe_hw * hw,ixgbe_link_speed * speed)2665 static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
2666 {
2667 struct ixgbe_mac_info *mac = &hw->mac;
2668 s32 status;
2669 u32 reg_val;
2670
2671 /* Disable all AN and force speed to 10G Serial. */
2672 status = mac->ops.read_iosf_sb_reg(hw,
2673 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2674 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2675 if (status != IXGBE_SUCCESS)
2676 return status;
2677
2678 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2679 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2680 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2681 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2682
2683 /* Select forced link speed for internal PHY. */
2684 switch (*speed) {
2685 case IXGBE_LINK_SPEED_10GB_FULL:
2686 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G;
2687 break;
2688 case IXGBE_LINK_SPEED_1GB_FULL:
2689 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
2690 break;
2691 default:
2692 /* Other link speeds are not supported by internal PHY. */
2693 return IXGBE_ERR_LINK_SETUP;
2694 }
2695
2696 status = mac->ops.write_iosf_sb_reg(hw,
2697 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2698 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2699
2700 /* Toggle port SW reset by AN reset. */
2701 status = ixgbe_restart_an_internal_phy_x550em(hw);
2702
2703 return status;
2704 }
2705
2706 /**
2707 * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP
2708 * @hw: pointer to hardware structure
2709 * @speed: new link speed
2710 * @autoneg_wait_to_complete: unused
2711 *
2712 * Configure the the integrated PHY for SFP support.
2713 **/
ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)2714 s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
2715 ixgbe_link_speed speed,
2716 bool autoneg_wait_to_complete)
2717 {
2718 s32 ret_val;
2719 u16 reg_phy_ext;
2720 bool setup_linear = FALSE;
2721 u32 reg_slice, reg_phy_int, slice_offset;
2722
2723 UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
2724
2725 /* Check if SFP module is supported and linear */
2726 ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
2727
2728 /* If no SFP module present, then return success. Return success since
2729 * SFP not present error is not excepted in the setup MAC link flow.
2730 */
2731 if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
2732 return IXGBE_SUCCESS;
2733
2734 if (ret_val != IXGBE_SUCCESS)
2735 return ret_val;
2736
2737 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
2738 /* Configure internal PHY for native SFI based on module type */
2739 ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
2740 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2741 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_phy_int);
2742
2743 if (ret_val != IXGBE_SUCCESS)
2744 return ret_val;
2745
2746 reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA;
2747 if (!setup_linear)
2748 reg_phy_int |= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR;
2749
2750 ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
2751 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2752 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int);
2753
2754 if (ret_val != IXGBE_SUCCESS)
2755 return ret_val;
2756
2757 /* Setup SFI internal link. */
2758 ret_val = ixgbe_setup_sfi_x550a(hw, &speed);
2759 } else {
2760 /* Configure internal PHY for KR/KX. */
2761 ixgbe_setup_kr_speed_x550em(hw, speed);
2762
2763 if (hw->phy.addr == 0x0 || hw->phy.addr == 0xFFFF) {
2764 /* Find Address */
2765 DEBUGOUT("Invalid NW_MNG_IF_SEL.MDIO_PHY_ADD value\n");
2766 return IXGBE_ERR_PHY_ADDR_INVALID;
2767 }
2768
2769 /* Get external PHY SKU id */
2770 ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU,
2771 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2772
2773 if (ret_val != IXGBE_SUCCESS)
2774 return ret_val;
2775
2776 /* When configuring quad port CS4223, the MAC instance is part
2777 * of the slice offset.
2778 */
2779 if (reg_phy_ext == IXGBE_CS4223_SKU_ID)
2780 slice_offset = (hw->bus.lan_id +
2781 (hw->bus.instance_id << 1)) << 12;
2782 else
2783 slice_offset = hw->bus.lan_id << 12;
2784
2785 /* Configure CS4227/CS4223 LINE side to proper mode. */
2786 reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset;
2787
2788 ret_val = hw->phy.ops.read_reg(hw, reg_slice,
2789 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2790
2791 if (ret_val != IXGBE_SUCCESS)
2792 return ret_val;
2793
2794 reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) |
2795 (IXGBE_CS4227_EDC_MODE_SR << 1));
2796
2797 if (setup_linear)
2798 reg_phy_ext = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
2799 else
2800 reg_phy_ext = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
2801 ret_val = hw->phy.ops.write_reg(hw, reg_slice,
2802 IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext);
2803
2804 /* Flush previous write with a read */
2805 ret_val = hw->phy.ops.read_reg(hw, reg_slice,
2806 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2807 }
2808 return ret_val;
2809 }
2810
2811 /**
2812 * ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration
2813 * @hw: pointer to hardware structure
2814 *
2815 * iXfI configuration needed for ixgbe_mac_X550EM_x devices.
2816 **/
ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw * hw)2817 static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
2818 {
2819 struct ixgbe_mac_info *mac = &hw->mac;
2820 s32 status;
2821 u32 reg_val;
2822
2823 /* Disable training protocol FSM. */
2824 status = mac->ops.read_iosf_sb_reg(hw,
2825 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
2826 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2827 if (status != IXGBE_SUCCESS)
2828 return status;
2829 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
2830 status = mac->ops.write_iosf_sb_reg(hw,
2831 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
2832 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2833 if (status != IXGBE_SUCCESS)
2834 return status;
2835
2836 /* Disable Flex from training TXFFE. */
2837 status = mac->ops.read_iosf_sb_reg(hw,
2838 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
2839 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2840 if (status != IXGBE_SUCCESS)
2841 return status;
2842 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
2843 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
2844 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
2845 status = mac->ops.write_iosf_sb_reg(hw,
2846 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
2847 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2848 if (status != IXGBE_SUCCESS)
2849 return status;
2850 status = mac->ops.read_iosf_sb_reg(hw,
2851 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
2852 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2853 if (status != IXGBE_SUCCESS)
2854 return status;
2855 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
2856 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
2857 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
2858 status = mac->ops.write_iosf_sb_reg(hw,
2859 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
2860 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2861 if (status != IXGBE_SUCCESS)
2862 return status;
2863
2864 /* Enable override for coefficients. */
2865 status = mac->ops.read_iosf_sb_reg(hw,
2866 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
2867 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2868 if (status != IXGBE_SUCCESS)
2869 return status;
2870 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN;
2871 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
2872 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
2873 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
2874 status = mac->ops.write_iosf_sb_reg(hw,
2875 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
2876 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2877 return status;
2878 }
2879
2880 /**
2881 * ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
2882 * @hw: pointer to hardware structure
2883 * @speed: the link speed to force
2884 *
2885 * Configures the integrated KR PHY to use iXFI mode. Used to connect an
2886 * internal and external PHY at a specific speed, without autonegotiation.
2887 **/
ixgbe_setup_ixfi_x550em(struct ixgbe_hw * hw,ixgbe_link_speed * speed)2888 static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
2889 {
2890 struct ixgbe_mac_info *mac = &hw->mac;
2891 s32 status;
2892 u32 reg_val;
2893
2894 /* iXFI is only supported with X552 */
2895 if (mac->type != ixgbe_mac_X550EM_x)
2896 return IXGBE_ERR_LINK_SETUP;
2897
2898 /* Disable AN and force speed to 10G Serial. */
2899 status = mac->ops.read_iosf_sb_reg(hw,
2900 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2901 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2902 if (status != IXGBE_SUCCESS)
2903 return status;
2904
2905 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
2906 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
2907
2908 /* Select forced link speed for internal PHY. */
2909 switch (*speed) {
2910 case IXGBE_LINK_SPEED_10GB_FULL:
2911 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
2912 break;
2913 case IXGBE_LINK_SPEED_1GB_FULL:
2914 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
2915 break;
2916 default:
2917 /* Other link speeds are not supported by internal KR PHY. */
2918 return IXGBE_ERR_LINK_SETUP;
2919 }
2920
2921 status = mac->ops.write_iosf_sb_reg(hw,
2922 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2923 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2924 if (status != IXGBE_SUCCESS)
2925 return status;
2926
2927 /* Additional configuration needed for x550em_x */
2928 if (hw->mac.type == ixgbe_mac_X550EM_x) {
2929 status = ixgbe_setup_ixfi_x550em_x(hw);
2930 if (status != IXGBE_SUCCESS)
2931 return status;
2932 }
2933
2934 /* Toggle port SW reset by AN reset. */
2935 status = ixgbe_restart_an_internal_phy_x550em(hw);
2936
2937 return status;
2938 }
2939
2940 /**
2941 * ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status
2942 * @hw: address of hardware structure
2943 * @link_up: address of boolean to indicate link status
2944 *
2945 * Returns error code if unable to get link status.
2946 */
ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw * hw,bool * link_up)2947 static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
2948 {
2949 u32 ret;
2950 u16 autoneg_status;
2951
2952 *link_up = FALSE;
2953
2954 /* read this twice back to back to indicate current status */
2955 ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
2956 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2957 &autoneg_status);
2958 if (ret != IXGBE_SUCCESS)
2959 return ret;
2960
2961 ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
2962 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2963 &autoneg_status);
2964 if (ret != IXGBE_SUCCESS)
2965 return ret;
2966
2967 *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS);
2968
2969 return IXGBE_SUCCESS;
2970 }
2971
2972 /**
2973 * ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link
2974 * @hw: point to hardware structure
2975 *
2976 * Configures the link between the integrated KR PHY and the external X557 PHY
2977 * The driver will call this function when it gets a link status change
2978 * interrupt from the X557 PHY. This function configures the link speed
2979 * between the PHYs to match the link speed of the BASE-T link.
2980 *
2981 * A return of a non-zero value indicates an error, and the base driver should
2982 * not report link up.
2983 */
ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw * hw)2984 s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
2985 {
2986 ixgbe_link_speed force_speed;
2987 bool link_up;
2988 u32 status;
2989 u16 speed;
2990
2991 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
2992 return IXGBE_ERR_CONFIG;
2993
2994 if (hw->mac.type == ixgbe_mac_X550EM_x &&
2995 !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
2996 /* If link is down, there is no setup necessary so return */
2997 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
2998 if (status != IXGBE_SUCCESS)
2999 return status;
3000
3001 if (!link_up)
3002 return IXGBE_SUCCESS;
3003
3004 status = hw->phy.ops.read_reg(hw,
3005 IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
3006 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3007 &speed);
3008 if (status != IXGBE_SUCCESS)
3009 return status;
3010
3011 /* If link is still down - no setup is required so return */
3012 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3013 if (status != IXGBE_SUCCESS)
3014 return status;
3015 if (!link_up)
3016 return IXGBE_SUCCESS;
3017
3018 /* clear everything but the speed and duplex bits */
3019 speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK;
3020
3021 switch (speed) {
3022 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL:
3023 force_speed = IXGBE_LINK_SPEED_10GB_FULL;
3024 break;
3025 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL:
3026 force_speed = IXGBE_LINK_SPEED_1GB_FULL;
3027 break;
3028 default:
3029 /* Internal PHY does not support anything else */
3030 return IXGBE_ERR_INVALID_LINK_SETTINGS;
3031 }
3032
3033 return ixgbe_setup_ixfi_x550em(hw, &force_speed);
3034 } else {
3035 speed = IXGBE_LINK_SPEED_10GB_FULL |
3036 IXGBE_LINK_SPEED_1GB_FULL;
3037 return ixgbe_setup_kr_speed_x550em(hw, speed);
3038 }
3039 }
3040
3041 /**
3042 * ixgbe_setup_phy_loopback_x550em - Configure the KR PHY for loopback.
3043 * @hw: pointer to hardware structure
3044 *
3045 * Configures the integrated KR PHY to use internal loopback mode.
3046 **/
ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw * hw)3047 s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw)
3048 {
3049 s32 status;
3050 u32 reg_val;
3051
3052 /* Disable AN and force speed to 10G Serial. */
3053 status = hw->mac.ops.read_iosf_sb_reg(hw,
3054 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3055 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3056 if (status != IXGBE_SUCCESS)
3057 return status;
3058 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
3059 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
3060 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
3061 status = hw->mac.ops.write_iosf_sb_reg(hw,
3062 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3063 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3064 if (status != IXGBE_SUCCESS)
3065 return status;
3066
3067 /* Set near-end loopback clocks. */
3068 status = hw->mac.ops.read_iosf_sb_reg(hw,
3069 IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
3070 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3071 if (status != IXGBE_SUCCESS)
3072 return status;
3073 reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B;
3074 reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS;
3075 status = hw->mac.ops.write_iosf_sb_reg(hw,
3076 IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
3077 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3078 if (status != IXGBE_SUCCESS)
3079 return status;
3080
3081 /* Set loopback enable. */
3082 status = hw->mac.ops.read_iosf_sb_reg(hw,
3083 IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
3084 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3085 if (status != IXGBE_SUCCESS)
3086 return status;
3087 reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK;
3088 status = hw->mac.ops.write_iosf_sb_reg(hw,
3089 IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
3090 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3091 if (status != IXGBE_SUCCESS)
3092 return status;
3093
3094 /* Training bypass. */
3095 status = hw->mac.ops.read_iosf_sb_reg(hw,
3096 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3097 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3098 if (status != IXGBE_SUCCESS)
3099 return status;
3100 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS;
3101 status = hw->mac.ops.write_iosf_sb_reg(hw,
3102 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3103 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3104
3105 return status;
3106 }
3107
3108 /**
3109 * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
3110 * assuming that the semaphore is already obtained.
3111 * @hw: pointer to hardware structure
3112 * @offset: offset of word in the EEPROM to read
3113 * @data: word read from the EEPROM
3114 *
3115 * Reads a 16 bit word from the EEPROM using the hostif.
3116 **/
ixgbe_read_ee_hostif_X550(struct ixgbe_hw * hw,u16 offset,u16 * data)3117 s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
3118 {
3119 const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
3120 struct ixgbe_hic_read_shadow_ram buffer;
3121 s32 status;
3122
3123 DEBUGFUNC("ixgbe_read_ee_hostif_X550");
3124 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
3125 buffer.hdr.req.buf_lenh = 0;
3126 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
3127 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3128
3129 /* convert offset from words to bytes */
3130 buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
3131 /* one word */
3132 buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
3133 buffer.pad2 = 0;
3134 buffer.pad3 = 0;
3135
3136 status = hw->mac.ops.acquire_swfw_sync(hw, mask);
3137 if (status)
3138 return status;
3139
3140 status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
3141 IXGBE_HI_COMMAND_TIMEOUT);
3142 if (!status) {
3143 *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
3144 FW_NVM_DATA_OFFSET);
3145 }
3146
3147 hw->mac.ops.release_swfw_sync(hw, mask);
3148 return status;
3149 }
3150
3151 /**
3152 * ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif
3153 * @hw: pointer to hardware structure
3154 * @offset: offset of word in the EEPROM to read
3155 * @words: number of words
3156 * @data: word(s) read from the EEPROM
3157 *
3158 * Reads a 16 bit word(s) from the EEPROM using the hostif.
3159 **/
ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)3160 s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
3161 u16 offset, u16 words, u16 *data)
3162 {
3163 const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
3164 struct ixgbe_hic_read_shadow_ram buffer;
3165 u32 current_word = 0;
3166 u16 words_to_read;
3167 s32 status;
3168 u32 i;
3169
3170 DEBUGFUNC("ixgbe_read_ee_hostif_buffer_X550");
3171
3172 /* Take semaphore for the entire operation. */
3173 status = hw->mac.ops.acquire_swfw_sync(hw, mask);
3174 if (status) {
3175 DEBUGOUT("EEPROM read buffer - semaphore failed\n");
3176 return status;
3177 }
3178
3179 while (words) {
3180 if (words > FW_MAX_READ_BUFFER_SIZE / 2)
3181 words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
3182 else
3183 words_to_read = words;
3184
3185 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
3186 buffer.hdr.req.buf_lenh = 0;
3187 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
3188 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3189
3190 /* convert offset from words to bytes */
3191 buffer.address = IXGBE_CPU_TO_BE32((offset + current_word) * 2);
3192 buffer.length = IXGBE_CPU_TO_BE16(words_to_read * 2);
3193 buffer.pad2 = 0;
3194 buffer.pad3 = 0;
3195
3196 status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
3197 IXGBE_HI_COMMAND_TIMEOUT);
3198
3199 if (status) {
3200 DEBUGOUT("Host interface command failed\n");
3201 goto out;
3202 }
3203
3204 for (i = 0; i < words_to_read; i++) {
3205 u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) +
3206 2 * i;
3207 u32 value = IXGBE_READ_REG(hw, reg);
3208
3209 data[current_word] = (u16)(value & 0xffff);
3210 current_word++;
3211 i++;
3212 if (i < words_to_read) {
3213 value >>= 16;
3214 data[current_word] = (u16)(value & 0xffff);
3215 current_word++;
3216 }
3217 }
3218 words -= words_to_read;
3219 }
3220
3221 out:
3222 hw->mac.ops.release_swfw_sync(hw, mask);
3223 return status;
3224 }
3225
3226 /**
3227 * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
3228 * @hw: pointer to hardware structure
3229 * @offset: offset of word in the EEPROM to write
3230 * @data: word write to the EEPROM
3231 *
3232 * Write a 16 bit word to the EEPROM using the hostif.
3233 **/
ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw * hw,u16 offset,u16 data)3234 s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
3235 u16 data)
3236 {
3237 s32 status;
3238 struct ixgbe_hic_write_shadow_ram buffer;
3239
3240 DEBUGFUNC("ixgbe_write_ee_hostif_data_X550");
3241
3242 buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD;
3243 buffer.hdr.req.buf_lenh = 0;
3244 buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN;
3245 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3246
3247 /* one word */
3248 buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
3249 buffer.data = data;
3250 buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
3251
3252 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
3253 sizeof(buffer),
3254 IXGBE_HI_COMMAND_TIMEOUT, FALSE);
3255
3256 return status;
3257 }
3258
3259 /**
3260 * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
3261 * @hw: pointer to hardware structure
3262 * @offset: offset of word in the EEPROM to write
3263 * @data: word write to the EEPROM
3264 *
3265 * Write a 16 bit word to the EEPROM using the hostif.
3266 **/
ixgbe_write_ee_hostif_X550(struct ixgbe_hw * hw,u16 offset,u16 data)3267 s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
3268 u16 data)
3269 {
3270 s32 status = IXGBE_SUCCESS;
3271
3272 DEBUGFUNC("ixgbe_write_ee_hostif_X550");
3273
3274 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
3275 IXGBE_SUCCESS) {
3276 status = ixgbe_write_ee_hostif_data_X550(hw, offset, data);
3277 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3278 } else {
3279 DEBUGOUT("write ee hostif failed to get semaphore");
3280 status = IXGBE_ERR_SWFW_SYNC;
3281 }
3282
3283 return status;
3284 }
3285
3286 /**
3287 * ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif
3288 * @hw: pointer to hardware structure
3289 * @offset: offset of word in the EEPROM to write
3290 * @words: number of words
3291 * @data: word(s) write to the EEPROM
3292 *
3293 * Write a 16 bit word(s) to the EEPROM using the hostif.
3294 **/
ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)3295 s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
3296 u16 offset, u16 words, u16 *data)
3297 {
3298 s32 status = IXGBE_SUCCESS;
3299 u32 i = 0;
3300
3301 DEBUGFUNC("ixgbe_write_ee_hostif_buffer_X550");
3302
3303 /* Take semaphore for the entire operation. */
3304 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3305 if (status != IXGBE_SUCCESS) {
3306 DEBUGOUT("EEPROM write buffer - semaphore failed\n");
3307 goto out;
3308 }
3309
3310 for (i = 0; i < words; i++) {
3311 status = ixgbe_write_ee_hostif_data_X550(hw, offset + i,
3312 data[i]);
3313
3314 if (status != IXGBE_SUCCESS) {
3315 DEBUGOUT("Eeprom buffered write failed\n");
3316 break;
3317 }
3318 }
3319
3320 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3321 out:
3322
3323 return status;
3324 }
3325
3326 /**
3327 * ixgbe_checksum_ptr_x550 - Checksum one pointer region
3328 * @hw: pointer to hardware structure
3329 * @ptr: pointer offset in eeprom
3330 * @size: size of section pointed by ptr, if 0 first word will be used as size
3331 * @csum: address of checksum to update
3332 * @buffer: pointer to buffer containing calculated checksum
3333 * @buffer_size: size of buffer
3334 *
3335 * Returns error status for any failure
3336 */
ixgbe_checksum_ptr_x550(struct ixgbe_hw * hw,u16 ptr,u16 size,u16 * csum,u16 * buffer,u32 buffer_size)3337 static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
3338 u16 size, u16 *csum, u16 *buffer,
3339 u32 buffer_size)
3340 {
3341 u16 buf[256];
3342 s32 status;
3343 u16 length, bufsz, i, start;
3344 u16 *local_buffer;
3345
3346 bufsz = sizeof(buf) / sizeof(buf[0]);
3347
3348 /* Read a chunk at the pointer location */
3349 if (!buffer) {
3350 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf);
3351 if (status) {
3352 DEBUGOUT("Failed to read EEPROM image\n");
3353 return status;
3354 }
3355 local_buffer = buf;
3356 } else {
3357 if (buffer_size < ptr)
3358 return IXGBE_ERR_PARAM;
3359 local_buffer = &buffer[ptr];
3360 }
3361
3362 if (size) {
3363 start = 0;
3364 length = size;
3365 } else {
3366 start = 1;
3367 length = local_buffer[0];
3368
3369 /* Skip pointer section if length is invalid. */
3370 if (length == 0xFFFF || length == 0 ||
3371 (ptr + length) >= hw->eeprom.word_size)
3372 return IXGBE_SUCCESS;
3373 }
3374
3375 if (buffer && ((u32)start + (u32)length > buffer_size))
3376 return IXGBE_ERR_PARAM;
3377
3378 for (i = start; length; i++, length--) {
3379 if (i == bufsz && !buffer) {
3380 ptr += bufsz;
3381 i = 0;
3382 if (length < bufsz)
3383 bufsz = length;
3384
3385 /* Read a chunk at the pointer location */
3386 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr,
3387 bufsz, buf);
3388 if (status) {
3389 DEBUGOUT("Failed to read EEPROM image\n");
3390 return status;
3391 }
3392 }
3393 *csum += local_buffer[i];
3394 }
3395 return IXGBE_SUCCESS;
3396 }
3397
3398 /**
3399 * ixgbe_calc_checksum_X550 - Calculates and returns the checksum
3400 * @hw: pointer to hardware structure
3401 * @buffer: pointer to buffer containing calculated checksum
3402 * @buffer_size: size of buffer
3403 *
3404 * Returns a negative error code on error, or the 16-bit checksum
3405 **/
ixgbe_calc_checksum_X550(struct ixgbe_hw * hw,u16 * buffer,u32 buffer_size)3406 s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size)
3407 {
3408 u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
3409 u16 *local_buffer;
3410 s32 status;
3411 u16 checksum = 0;
3412 u16 pointer, i, size;
3413
3414 DEBUGFUNC("ixgbe_calc_eeprom_checksum_X550");
3415
3416 hw->eeprom.ops.init_params(hw);
3417
3418 if (!buffer) {
3419 /* Read pointer area */
3420 status = ixgbe_read_ee_hostif_buffer_X550(hw, 0,
3421 IXGBE_EEPROM_LAST_WORD + 1,
3422 eeprom_ptrs);
3423 if (status) {
3424 DEBUGOUT("Failed to read EEPROM image\n");
3425 return status;
3426 }
3427 local_buffer = eeprom_ptrs;
3428 } else {
3429 if (buffer_size < IXGBE_EEPROM_LAST_WORD)
3430 return IXGBE_ERR_PARAM;
3431 local_buffer = buffer;
3432 }
3433
3434 /*
3435 * For X550 hardware include 0x0-0x41 in the checksum, skip the
3436 * checksum word itself
3437 */
3438 for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++)
3439 if (i != IXGBE_EEPROM_CHECKSUM)
3440 checksum += local_buffer[i];
3441
3442 /*
3443 * Include all data from pointers 0x3, 0x6-0xE. This excludes the
3444 * FW, PHY module, and PCIe Expansion/Option ROM pointers.
3445 */
3446 for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) {
3447 if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
3448 continue;
3449
3450 pointer = local_buffer[i];
3451
3452 /* Skip pointer section if the pointer is invalid. */
3453 if (pointer == 0xFFFF || pointer == 0 ||
3454 pointer >= hw->eeprom.word_size)
3455 continue;
3456
3457 switch (i) {
3458 case IXGBE_PCIE_GENERAL_PTR:
3459 size = IXGBE_IXGBE_PCIE_GENERAL_SIZE;
3460 break;
3461 case IXGBE_PCIE_CONFIG0_PTR:
3462 case IXGBE_PCIE_CONFIG1_PTR:
3463 size = IXGBE_PCIE_CONFIG_SIZE;
3464 break;
3465 default:
3466 size = 0;
3467 break;
3468 }
3469
3470 status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum,
3471 buffer, buffer_size);
3472 if (status)
3473 return status;
3474 }
3475
3476 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
3477
3478 return (s32)checksum;
3479 }
3480
3481 /**
3482 * ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
3483 * @hw: pointer to hardware structure
3484 *
3485 * Returns a negative error code on error, or the 16-bit checksum
3486 **/
ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw * hw)3487 s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
3488 {
3489 return ixgbe_calc_checksum_X550(hw, NULL, 0);
3490 }
3491
3492 /**
3493 * ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum
3494 * @hw: pointer to hardware structure
3495 * @checksum_val: calculated checksum
3496 *
3497 * Performs checksum calculation and validates the EEPROM checksum. If the
3498 * caller does not need checksum_val, the value can be NULL.
3499 **/
ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw * hw,u16 * checksum_val)3500 s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val)
3501 {
3502 s32 status;
3503 u16 checksum;
3504 u16 read_checksum = 0;
3505
3506 DEBUGFUNC("ixgbe_validate_eeprom_checksum_X550");
3507
3508 /* Read the first word from the EEPROM. If this times out or fails, do
3509 * not continue or we could be in for a very long wait while every
3510 * EEPROM read fails
3511 */
3512 status = hw->eeprom.ops.read(hw, 0, &checksum);
3513 if (status) {
3514 DEBUGOUT("EEPROM read failed\n");
3515 return status;
3516 }
3517
3518 status = hw->eeprom.ops.calc_checksum(hw);
3519 if (status < 0)
3520 return status;
3521
3522 checksum = (u16)(status & 0xffff);
3523
3524 status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
3525 &read_checksum);
3526 if (status)
3527 return status;
3528
3529 /* Verify read checksum from EEPROM is the same as
3530 * calculated checksum
3531 */
3532 if (read_checksum != checksum) {
3533 status = IXGBE_ERR_EEPROM_CHECKSUM;
3534 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
3535 "Invalid EEPROM checksum");
3536 }
3537
3538 /* If the user cares, return the calculated checksum */
3539 if (checksum_val)
3540 *checksum_val = checksum;
3541
3542 return status;
3543 }
3544
3545 /**
3546 * ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash
3547 * @hw: pointer to hardware structure
3548 *
3549 * After writing EEPROM to shadow RAM using EEWR register, software calculates
3550 * checksum and updates the EEPROM and instructs the hardware to update
3551 * the flash.
3552 **/
ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw * hw)3553 s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
3554 {
3555 s32 status;
3556 u16 checksum = 0;
3557
3558 DEBUGFUNC("ixgbe_update_eeprom_checksum_X550");
3559
3560 /* Read the first word from the EEPROM. If this times out or fails, do
3561 * not continue or we could be in for a very long wait while every
3562 * EEPROM read fails
3563 */
3564 status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum);
3565 if (status) {
3566 DEBUGOUT("EEPROM read failed\n");
3567 return status;
3568 }
3569
3570 status = ixgbe_calc_eeprom_checksum_X550(hw);
3571 if (status < 0)
3572 return status;
3573
3574 checksum = (u16)(status & 0xffff);
3575
3576 status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
3577 checksum);
3578 if (status)
3579 return status;
3580
3581 status = ixgbe_update_flash_X550(hw);
3582
3583 return status;
3584 }
3585
3586 /**
3587 * ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device
3588 * @hw: pointer to hardware structure
3589 *
3590 * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
3591 **/
ixgbe_update_flash_X550(struct ixgbe_hw * hw)3592 s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
3593 {
3594 s32 status = IXGBE_SUCCESS;
3595 union ixgbe_hic_hdr2 buffer;
3596
3597 DEBUGFUNC("ixgbe_update_flash_X550");
3598
3599 buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD;
3600 buffer.req.buf_lenh = 0;
3601 buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN;
3602 buffer.req.checksum = FW_DEFAULT_CHECKSUM;
3603
3604 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
3605 sizeof(buffer),
3606 IXGBE_HI_COMMAND_TIMEOUT, FALSE);
3607
3608 return status;
3609 }
3610
3611 /**
3612 * ixgbe_get_supported_physical_layer_X550em - Returns physical layer type
3613 * @hw: pointer to hardware structure
3614 *
3615 * Determines physical layer capabilities of the current configuration.
3616 **/
ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw * hw)3617 u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
3618 {
3619 u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
3620 u16 ext_ability = 0;
3621
3622 DEBUGFUNC("ixgbe_get_supported_physical_layer_X550em");
3623
3624 hw->phy.ops.identify(hw);
3625
3626 switch (hw->phy.type) {
3627 case ixgbe_phy_x550em_kr:
3628 if (hw->mac.type == ixgbe_mac_X550EM_a) {
3629 if (hw->phy.nw_mng_if_sel &
3630 IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
3631 physical_layer =
3632 IXGBE_PHYSICAL_LAYER_2500BASE_KX;
3633 break;
3634 } else if (hw->device_id ==
3635 IXGBE_DEV_ID_X550EM_A_KR_L) {
3636 physical_layer =
3637 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3638 break;
3639 }
3640 }
3641 /* fall through */
3642 case ixgbe_phy_x550em_xfi:
3643 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR |
3644 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3645 break;
3646 case ixgbe_phy_x550em_kx4:
3647 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
3648 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3649 break;
3650 case ixgbe_phy_x550em_ext_t:
3651 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
3652 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
3653 &ext_ability);
3654 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
3655 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
3656 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
3657 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
3658 break;
3659 case ixgbe_phy_fw:
3660 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_1GB_FULL)
3661 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
3662 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_100_FULL)
3663 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
3664 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_10_FULL)
3665 physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T;
3666 break;
3667 case ixgbe_phy_sgmii:
3668 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3669 break;
3670 case ixgbe_phy_ext_1g_t:
3671 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
3672 break;
3673 default:
3674 break;
3675 }
3676
3677 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
3678 physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
3679
3680 return physical_layer;
3681 }
3682
3683 /**
3684 * ixgbe_get_bus_info_x550em - Set PCI bus info
3685 * @hw: pointer to hardware structure
3686 *
3687 * Sets bus link width and speed to unknown because X550em is
3688 * not a PCI device.
3689 **/
ixgbe_get_bus_info_X550em(struct ixgbe_hw * hw)3690 s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
3691 {
3692
3693 DEBUGFUNC("ixgbe_get_bus_info_x550em");
3694
3695 hw->bus.width = ixgbe_bus_width_unknown;
3696 hw->bus.speed = ixgbe_bus_speed_unknown;
3697
3698 hw->mac.ops.set_lan_id(hw);
3699
3700 return IXGBE_SUCCESS;
3701 }
3702
3703 /**
3704 * ixgbe_disable_rx_x550 - Disable RX unit
3705 * @hw: pointer to hardware structure
3706 *
3707 * Enables the Rx DMA unit for x550
3708 **/
ixgbe_disable_rx_x550(struct ixgbe_hw * hw)3709 void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
3710 {
3711 u32 rxctrl, pfdtxgswc;
3712 s32 status;
3713 struct ixgbe_hic_disable_rxen fw_cmd;
3714
3715 DEBUGFUNC("ixgbe_enable_rx_dma_x550");
3716
3717 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3718 if (rxctrl & IXGBE_RXCTRL_RXEN) {
3719 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
3720 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
3721 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
3722 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
3723 hw->mac.set_lben = TRUE;
3724 } else {
3725 hw->mac.set_lben = FALSE;
3726 }
3727
3728 fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD;
3729 fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN;
3730 fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
3731 fw_cmd.port_number = (u8)hw->bus.lan_id;
3732
3733 status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
3734 sizeof(struct ixgbe_hic_disable_rxen),
3735 IXGBE_HI_COMMAND_TIMEOUT, TRUE);
3736
3737 /* If we fail - disable RX using register write */
3738 if (status) {
3739 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3740 if (rxctrl & IXGBE_RXCTRL_RXEN) {
3741 rxctrl &= ~IXGBE_RXCTRL_RXEN;
3742 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
3743 }
3744 }
3745 }
3746 }
3747
3748 /**
3749 * ixgbe_enter_lplu_x550em - Transition to low power states
3750 * @hw: pointer to hardware structure
3751 *
3752 * Configures Low Power Link Up on transition to low power states
3753 * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
3754 * X557 PHY immediately prior to entering LPLU.
3755 **/
ixgbe_enter_lplu_t_x550em(struct ixgbe_hw * hw)3756 s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
3757 {
3758 u16 an_10g_cntl_reg, autoneg_reg, speed;
3759 s32 status;
3760 ixgbe_link_speed lcd_speed;
3761 u32 save_autoneg;
3762 bool link_up;
3763
3764 /* SW LPLU not required on later HW revisions. */
3765 if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
3766 (IXGBE_FUSES0_REV_MASK &
3767 IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
3768 return IXGBE_SUCCESS;
3769
3770 /* If blocked by MNG FW, then don't restart AN */
3771 if (ixgbe_check_reset_blocked(hw))
3772 return IXGBE_SUCCESS;
3773
3774 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3775 if (status != IXGBE_SUCCESS)
3776 return status;
3777
3778 status = ixgbe_read_eeprom(hw, NVM_INIT_CTRL_3, &hw->eeprom.ctrl_word_3);
3779
3780 if (status != IXGBE_SUCCESS)
3781 return status;
3782
3783 /* If link is down, LPLU disabled in NVM, WoL disabled, or manageability
3784 * disabled, then force link down by entering low power mode.
3785 */
3786 if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) ||
3787 !(hw->wol_enabled || ixgbe_mng_present(hw)))
3788 return ixgbe_set_copper_phy_power(hw, FALSE);
3789
3790 /* Determine LCD */
3791 status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed);
3792
3793 if (status != IXGBE_SUCCESS)
3794 return status;
3795
3796 /* If no valid LCD link speed, then force link down and exit. */
3797 if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN)
3798 return ixgbe_set_copper_phy_power(hw, FALSE);
3799
3800 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
3801 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3802 &speed);
3803
3804 if (status != IXGBE_SUCCESS)
3805 return status;
3806
3807 /* If no link now, speed is invalid so take link down */
3808 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3809 if (status != IXGBE_SUCCESS)
3810 return ixgbe_set_copper_phy_power(hw, FALSE);
3811
3812 /* clear everything but the speed bits */
3813 speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK;
3814
3815 /* If current speed is already LCD, then exit. */
3816 if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) &&
3817 (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) ||
3818 ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) &&
3819 (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL)))
3820 return status;
3821
3822 /* Clear AN completed indication */
3823 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM,
3824 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3825 &autoneg_reg);
3826
3827 if (status != IXGBE_SUCCESS)
3828 return status;
3829
3830 status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
3831 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3832 &an_10g_cntl_reg);
3833
3834 if (status != IXGBE_SUCCESS)
3835 return status;
3836
3837 status = hw->phy.ops.read_reg(hw,
3838 IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
3839 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3840 &autoneg_reg);
3841
3842 if (status != IXGBE_SUCCESS)
3843 return status;
3844
3845 save_autoneg = hw->phy.autoneg_advertised;
3846
3847 /* Setup link at least common link speed */
3848 status = hw->mac.ops.setup_link(hw, lcd_speed, FALSE);
3849
3850 /* restore autoneg from before setting lplu speed */
3851 hw->phy.autoneg_advertised = save_autoneg;
3852
3853 return status;
3854 }
3855
3856 /**
3857 * ixgbe_get_lcd_x550em - Determine lowest common denominator
3858 * @hw: pointer to hardware structure
3859 * @lcd_speed: pointer to lowest common link speed
3860 *
3861 * Determine lowest common link speed with link partner.
3862 **/
ixgbe_get_lcd_t_x550em(struct ixgbe_hw * hw,ixgbe_link_speed * lcd_speed)3863 s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed)
3864 {
3865 u16 an_lp_status;
3866 s32 status;
3867 u16 word = hw->eeprom.ctrl_word_3;
3868
3869 *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN;
3870
3871 status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS,
3872 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3873 &an_lp_status);
3874
3875 if (status != IXGBE_SUCCESS)
3876 return status;
3877
3878 /* If link partner advertised 1G, return 1G */
3879 if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) {
3880 *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL;
3881 return status;
3882 }
3883
3884 /* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */
3885 if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) ||
3886 (word & NVM_INIT_CTRL_3_D10GMP_PORT0))
3887 return status;
3888
3889 /* Link partner not capable of lower speeds, return 10G */
3890 *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL;
3891 return status;
3892 }
3893
3894 /**
3895 * ixgbe_setup_fc_X550em - Set up flow control
3896 * @hw: pointer to hardware structure
3897 *
3898 * Called at init time to set up flow control.
3899 **/
ixgbe_setup_fc_X550em(struct ixgbe_hw * hw)3900 s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw)
3901 {
3902 s32 ret_val = IXGBE_SUCCESS;
3903 u32 pause, asm_dir, reg_val;
3904
3905 DEBUGFUNC("ixgbe_setup_fc_X550em");
3906
3907 /* Validate the requested mode */
3908 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
3909 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
3910 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
3911 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
3912 goto out;
3913 }
3914
3915 /* 10gig parts do not have a word in the EEPROM to determine the
3916 * default flow control setting, so we explicitly set it to full.
3917 */
3918 if (hw->fc.requested_mode == ixgbe_fc_default)
3919 hw->fc.requested_mode = ixgbe_fc_full;
3920
3921 /* Determine PAUSE and ASM_DIR bits. */
3922 switch (hw->fc.requested_mode) {
3923 case ixgbe_fc_none:
3924 pause = 0;
3925 asm_dir = 0;
3926 break;
3927 case ixgbe_fc_tx_pause:
3928 pause = 0;
3929 asm_dir = 1;
3930 break;
3931 case ixgbe_fc_rx_pause:
3932 /* Rx Flow control is enabled and Tx Flow control is
3933 * disabled by software override. Since there really
3934 * isn't a way to advertise that we are capable of RX
3935 * Pause ONLY, we will advertise that we support both
3936 * symmetric and asymmetric Rx PAUSE, as such we fall
3937 * through to the fc_full statement. Later, we will
3938 * disable the adapter's ability to send PAUSE frames.
3939 */
3940 case ixgbe_fc_full:
3941 pause = 1;
3942 asm_dir = 1;
3943 break;
3944 default:
3945 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
3946 "Flow control param set incorrectly\n");
3947 ret_val = IXGBE_ERR_CONFIG;
3948 goto out;
3949 }
3950
3951 switch (hw->device_id) {
3952 case IXGBE_DEV_ID_X550EM_X_KR:
3953 case IXGBE_DEV_ID_X550EM_A_KR:
3954 case IXGBE_DEV_ID_X550EM_A_KR_L:
3955 ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
3956 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
3957 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3958 if (ret_val != IXGBE_SUCCESS)
3959 goto out;
3960 reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
3961 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
3962 if (pause)
3963 reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
3964 if (asm_dir)
3965 reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
3966 ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
3967 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
3968 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3969
3970 /* This device does not fully support AN. */
3971 hw->fc.disable_fc_autoneg = TRUE;
3972 break;
3973 case IXGBE_DEV_ID_X550EM_X_XFI:
3974 hw->fc.disable_fc_autoneg = TRUE;
3975 break;
3976 default:
3977 break;
3978 }
3979
3980 out:
3981 return ret_val;
3982 }
3983
3984 /**
3985 * ixgbe_fc_autoneg_backplane_x550em_a - Enable flow control IEEE clause 37
3986 * @hw: pointer to hardware structure
3987 *
3988 * Enable flow control according to IEEE clause 37.
3989 **/
ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw * hw)3990 void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw)
3991 {
3992 u32 link_s1, lp_an_page_low, an_cntl_1;
3993 s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
3994 ixgbe_link_speed speed;
3995 bool link_up;
3996
3997 /* AN should have completed when the cable was plugged in.
3998 * Look for reasons to bail out. Bail out if:
3999 * - FC autoneg is disabled, or if
4000 * - link is not up.
4001 */
4002 if (hw->fc.disable_fc_autoneg) {
4003 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4004 "Flow control autoneg is disabled");
4005 goto out;
4006 }
4007
4008 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
4009 if (!link_up) {
4010 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
4011 goto out;
4012 }
4013
4014 /* Check at auto-negotiation has completed */
4015 status = hw->mac.ops.read_iosf_sb_reg(hw,
4016 IXGBE_KRM_LINK_S1(hw->bus.lan_id),
4017 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1);
4018
4019 if (status != IXGBE_SUCCESS ||
4020 (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) {
4021 DEBUGOUT("Auto-Negotiation did not complete\n");
4022 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4023 goto out;
4024 }
4025
4026 /* Read the 10g AN autoc and LP ability registers and resolve
4027 * local flow control settings accordingly
4028 */
4029 status = hw->mac.ops.read_iosf_sb_reg(hw,
4030 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4031 IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1);
4032
4033 if (status != IXGBE_SUCCESS) {
4034 DEBUGOUT("Auto-Negotiation did not complete\n");
4035 goto out;
4036 }
4037
4038 status = hw->mac.ops.read_iosf_sb_reg(hw,
4039 IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id),
4040 IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low);
4041
4042 if (status != IXGBE_SUCCESS) {
4043 DEBUGOUT("Auto-Negotiation did not complete\n");
4044 goto out;
4045 }
4046
4047 status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low,
4048 IXGBE_KRM_AN_CNTL_1_SYM_PAUSE,
4049 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE,
4050 IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE,
4051 IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE);
4052
4053 out:
4054 if (status == IXGBE_SUCCESS) {
4055 hw->fc.fc_was_autonegged = TRUE;
4056 } else {
4057 hw->fc.fc_was_autonegged = FALSE;
4058 hw->fc.current_mode = hw->fc.requested_mode;
4059 }
4060 }
4061
4062 /**
4063 * ixgbe_fc_autoneg_fiber_x550em_a - passthrough FC settings
4064 * @hw: pointer to hardware structure
4065 *
4066 **/
ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw * hw)4067 void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw)
4068 {
4069 hw->fc.fc_was_autonegged = FALSE;
4070 hw->fc.current_mode = hw->fc.requested_mode;
4071 }
4072
4073 /**
4074 * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37
4075 * @hw: pointer to hardware structure
4076 *
4077 * Enable flow control according to IEEE clause 37.
4078 **/
ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw * hw)4079 void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
4080 {
4081 s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4082 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
4083 ixgbe_link_speed speed;
4084 bool link_up;
4085
4086 /* AN should have completed when the cable was plugged in.
4087 * Look for reasons to bail out. Bail out if:
4088 * - FC autoneg is disabled, or if
4089 * - link is not up.
4090 */
4091 if (hw->fc.disable_fc_autoneg) {
4092 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4093 "Flow control autoneg is disabled");
4094 goto out;
4095 }
4096
4097 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
4098 if (!link_up) {
4099 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
4100 goto out;
4101 }
4102
4103 /* Check if auto-negotiation has completed */
4104 status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info);
4105 if (status != IXGBE_SUCCESS ||
4106 !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) {
4107 DEBUGOUT("Auto-Negotiation did not complete\n");
4108 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4109 goto out;
4110 }
4111
4112 /* Negotiate the flow control */
4113 status = ixgbe_negotiate_fc(hw, info[0], info[0],
4114 FW_PHY_ACT_GET_LINK_INFO_FC_RX,
4115 FW_PHY_ACT_GET_LINK_INFO_FC_TX,
4116 FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX,
4117 FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX);
4118
4119 out:
4120 if (status == IXGBE_SUCCESS) {
4121 hw->fc.fc_was_autonegged = TRUE;
4122 } else {
4123 hw->fc.fc_was_autonegged = FALSE;
4124 hw->fc.current_mode = hw->fc.requested_mode;
4125 }
4126 }
4127
4128 /**
4129 * ixgbe_setup_fc_backplane_x550em_a - Set up flow control
4130 * @hw: pointer to hardware structure
4131 *
4132 * Called at init time to set up flow control.
4133 **/
ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw * hw)4134 s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
4135 {
4136 s32 status = IXGBE_SUCCESS;
4137 u32 an_cntl = 0;
4138
4139 DEBUGFUNC("ixgbe_setup_fc_backplane_x550em_a");
4140
4141 /* Validate the requested mode */
4142 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
4143 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4144 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
4145 return IXGBE_ERR_INVALID_LINK_SETTINGS;
4146 }
4147
4148 if (hw->fc.requested_mode == ixgbe_fc_default)
4149 hw->fc.requested_mode = ixgbe_fc_full;
4150
4151 /* Set up the 1G and 10G flow control advertisement registers so the
4152 * HW will be able to do FC autoneg once the cable is plugged in. If
4153 * we link at 10G, the 1G advertisement is harmless and vice versa.
4154 */
4155 status = hw->mac.ops.read_iosf_sb_reg(hw,
4156 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4157 IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl);
4158
4159 if (status != IXGBE_SUCCESS) {
4160 DEBUGOUT("Auto-Negotiation did not complete\n");
4161 return status;
4162 }
4163
4164 /* The possible values of fc.requested_mode are:
4165 * 0: Flow control is completely disabled
4166 * 1: Rx flow control is enabled (we can receive pause frames,
4167 * but not send pause frames).
4168 * 2: Tx flow control is enabled (we can send pause frames but
4169 * we do not support receiving pause frames).
4170 * 3: Both Rx and Tx flow control (symmetric) are enabled.
4171 * other: Invalid.
4172 */
4173 switch (hw->fc.requested_mode) {
4174 case ixgbe_fc_none:
4175 /* Flow control completely disabled by software override. */
4176 an_cntl &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4177 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
4178 break;
4179 case ixgbe_fc_tx_pause:
4180 /* Tx Flow control is enabled, and Rx Flow control is
4181 * disabled by software override.
4182 */
4183 an_cntl |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4184 an_cntl &= ~IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
4185 break;
4186 case ixgbe_fc_rx_pause:
4187 /* Rx Flow control is enabled and Tx Flow control is
4188 * disabled by software override. Since there really
4189 * isn't a way to advertise that we are capable of RX
4190 * Pause ONLY, we will advertise that we support both
4191 * symmetric and asymmetric Rx PAUSE, as such we fall
4192 * through to the fc_full statement. Later, we will
4193 * disable the adapter's ability to send PAUSE frames.
4194 */
4195 case ixgbe_fc_full:
4196 /* Flow control (both Rx and Tx) is enabled by SW override. */
4197 an_cntl |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4198 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4199 break;
4200 default:
4201 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
4202 "Flow control param set incorrectly\n");
4203 return IXGBE_ERR_CONFIG;
4204 }
4205
4206 status = hw->mac.ops.write_iosf_sb_reg(hw,
4207 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4208 IXGBE_SB_IOSF_TARGET_KR_PHY, an_cntl);
4209
4210 /* Restart auto-negotiation. */
4211 status = ixgbe_restart_an_internal_phy_x550em(hw);
4212
4213 return status;
4214 }
4215
4216 /**
4217 * ixgbe_set_mux - Set mux for port 1 access with CS4227
4218 * @hw: pointer to hardware structure
4219 * @state: set mux if 1, clear if 0
4220 */
ixgbe_set_mux(struct ixgbe_hw * hw,u8 state)4221 static void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state)
4222 {
4223 u32 esdp;
4224
4225 if (!hw->bus.lan_id)
4226 return;
4227 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
4228 if (state)
4229 esdp |= IXGBE_ESDP_SDP1;
4230 else
4231 esdp &= ~IXGBE_ESDP_SDP1;
4232 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4233 IXGBE_WRITE_FLUSH(hw);
4234 }
4235
4236 /**
4237 * ixgbe_acquire_swfw_sync_X550em - Acquire SWFW semaphore
4238 * @hw: pointer to hardware structure
4239 * @mask: Mask to specify which semaphore to acquire
4240 *
4241 * Acquires the SWFW semaphore and sets the I2C MUX
4242 **/
ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw * hw,u32 mask)4243 s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
4244 {
4245 s32 status;
4246
4247 DEBUGFUNC("ixgbe_acquire_swfw_sync_X550em");
4248
4249 status = ixgbe_acquire_swfw_sync_X540(hw, mask);
4250 if (status)
4251 return status;
4252
4253 if (mask & IXGBE_GSSR_I2C_MASK)
4254 ixgbe_set_mux(hw, 1);
4255
4256 return IXGBE_SUCCESS;
4257 }
4258
4259 /**
4260 * ixgbe_release_swfw_sync_X550em - Release SWFW semaphore
4261 * @hw: pointer to hardware structure
4262 * @mask: Mask to specify which semaphore to release
4263 *
4264 * Releases the SWFW semaphore and sets the I2C MUX
4265 **/
ixgbe_release_swfw_sync_X550em(struct ixgbe_hw * hw,u32 mask)4266 void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
4267 {
4268 DEBUGFUNC("ixgbe_release_swfw_sync_X550em");
4269
4270 if (mask & IXGBE_GSSR_I2C_MASK)
4271 ixgbe_set_mux(hw, 0);
4272
4273 ixgbe_release_swfw_sync_X540(hw, mask);
4274 }
4275
4276 /**
4277 * ixgbe_acquire_swfw_sync_X550a - Acquire SWFW semaphore
4278 * @hw: pointer to hardware structure
4279 * @mask: Mask to specify which semaphore to acquire
4280 *
4281 * Acquires the SWFW semaphore and get the shared phy token as needed
4282 */
ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw * hw,u32 mask)4283 static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
4284 {
4285 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
4286 int retries = FW_PHY_TOKEN_RETRIES;
4287 s32 status = IXGBE_SUCCESS;
4288
4289 DEBUGFUNC("ixgbe_acquire_swfw_sync_X550a");
4290
4291 while (--retries) {
4292 status = IXGBE_SUCCESS;
4293 if (hmask)
4294 status = ixgbe_acquire_swfw_sync_X540(hw, hmask);
4295 if (status) {
4296 DEBUGOUT1("Could not acquire SWFW semaphore, Status = %d\n",
4297 status);
4298 return status;
4299 }
4300 if (!(mask & IXGBE_GSSR_TOKEN_SM))
4301 return IXGBE_SUCCESS;
4302
4303 status = ixgbe_get_phy_token(hw);
4304 if (status == IXGBE_ERR_TOKEN_RETRY)
4305 DEBUGOUT1("Could not acquire PHY token, Status = %d\n",
4306 status);
4307
4308 if (status == IXGBE_SUCCESS)
4309 return IXGBE_SUCCESS;
4310
4311 if (hmask)
4312 ixgbe_release_swfw_sync_X540(hw, hmask);
4313
4314 if (status != IXGBE_ERR_TOKEN_RETRY) {
4315 DEBUGOUT1("Unable to retry acquiring the PHY token, Status = %d\n",
4316 status);
4317 return status;
4318 }
4319 }
4320
4321 DEBUGOUT1("Semaphore acquisition retries failed!: PHY ID = 0x%08X\n",
4322 hw->phy.id);
4323 return status;
4324 }
4325
4326 /**
4327 * ixgbe_release_swfw_sync_X550a - Release SWFW semaphore
4328 * @hw: pointer to hardware structure
4329 * @mask: Mask to specify which semaphore to release
4330 *
4331 * Releases the SWFW semaphore and puts the shared phy token as needed
4332 */
ixgbe_release_swfw_sync_X550a(struct ixgbe_hw * hw,u32 mask)4333 static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
4334 {
4335 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
4336
4337 DEBUGFUNC("ixgbe_release_swfw_sync_X550a");
4338
4339 if (mask & IXGBE_GSSR_TOKEN_SM)
4340 ixgbe_put_phy_token(hw);
4341
4342 if (hmask)
4343 ixgbe_release_swfw_sync_X540(hw, hmask);
4344 }
4345
4346 /**
4347 * ixgbe_read_phy_reg_x550a - Reads specified PHY register
4348 * @hw: pointer to hardware structure
4349 * @reg_addr: 32 bit address of PHY register to read
4350 * @device_type: 5 bit device type
4351 * @phy_data: Pointer to read data from PHY register
4352 *
4353 * Reads a value from a specified PHY register using the SWFW lock and PHY
4354 * Token. The PHY Token is needed since the MDIO is shared between to MAC
4355 * instances.
4356 **/
ixgbe_read_phy_reg_x550a(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u16 * phy_data)4357 s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
4358 u32 device_type, u16 *phy_data)
4359 {
4360 s32 status;
4361 u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
4362
4363 DEBUGFUNC("ixgbe_read_phy_reg_x550a");
4364
4365 if (hw->mac.ops.acquire_swfw_sync(hw, mask))
4366 return IXGBE_ERR_SWFW_SYNC;
4367
4368 status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
4369
4370 hw->mac.ops.release_swfw_sync(hw, mask);
4371
4372 return status;
4373 }
4374
4375 /**
4376 * ixgbe_write_phy_reg_x550a - Writes specified PHY register
4377 * @hw: pointer to hardware structure
4378 * @reg_addr: 32 bit PHY register to write
4379 * @device_type: 5 bit device type
4380 * @phy_data: Data to write to the PHY register
4381 *
4382 * Writes a value to specified PHY register using the SWFW lock and PHY Token.
4383 * The PHY Token is needed since the MDIO is shared between to MAC instances.
4384 **/
ixgbe_write_phy_reg_x550a(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u16 phy_data)4385 s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
4386 u32 device_type, u16 phy_data)
4387 {
4388 s32 status;
4389 u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
4390
4391 DEBUGFUNC("ixgbe_write_phy_reg_x550a");
4392
4393 if (hw->mac.ops.acquire_swfw_sync(hw, mask) == IXGBE_SUCCESS) {
4394 status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type,
4395 phy_data);
4396 hw->mac.ops.release_swfw_sync(hw, mask);
4397 } else {
4398 status = IXGBE_ERR_SWFW_SYNC;
4399 }
4400
4401 return status;
4402 }
4403
4404 /**
4405 * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt
4406 * @hw: pointer to hardware structure
4407 *
4408 * Handle external Base T PHY interrupt. If high temperature
4409 * failure alarm then return error, else if link status change
4410 * then setup internal/external PHY link
4411 *
4412 * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
4413 * failure alarm, else return PHY access status.
4414 */
ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw * hw)4415 s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw)
4416 {
4417 bool lsc;
4418 u32 status;
4419
4420 status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
4421
4422 if (status != IXGBE_SUCCESS)
4423 return status;
4424
4425 if (lsc)
4426 return ixgbe_setup_internal_phy(hw);
4427
4428 return IXGBE_SUCCESS;
4429 }
4430
4431 /**
4432 * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
4433 * @hw: pointer to hardware structure
4434 * @speed: new link speed
4435 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
4436 *
4437 * Setup internal/external PHY link speed based on link speed, then set
4438 * external PHY auto advertised link speed.
4439 *
4440 * Returns error status for any failure
4441 **/
ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)4442 s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
4443 ixgbe_link_speed speed,
4444 bool autoneg_wait_to_complete)
4445 {
4446 s32 status;
4447 ixgbe_link_speed force_speed;
4448
4449 DEBUGFUNC("ixgbe_setup_mac_link_t_X550em");
4450
4451 /* Setup internal/external PHY link speed to iXFI (10G), unless
4452 * only 1G is auto advertised then setup KX link.
4453 */
4454 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
4455 force_speed = IXGBE_LINK_SPEED_10GB_FULL;
4456 else
4457 force_speed = IXGBE_LINK_SPEED_1GB_FULL;
4458
4459 /* If X552 and internal link mode is XFI, then setup XFI internal link.
4460 */
4461 if (hw->mac.type == ixgbe_mac_X550EM_x &&
4462 !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
4463 status = ixgbe_setup_ixfi_x550em(hw, &force_speed);
4464
4465 if (status != IXGBE_SUCCESS)
4466 return status;
4467 }
4468
4469 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete);
4470 }
4471
4472 /**
4473 * ixgbe_check_link_t_X550em - Determine link and speed status
4474 * @hw: pointer to hardware structure
4475 * @speed: pointer to link speed
4476 * @link_up: TRUE when link is up
4477 * @link_up_wait_to_complete: bool used to wait for link up or not
4478 *
4479 * Check that both the MAC and X557 external PHY have link.
4480 **/
ixgbe_check_link_t_X550em(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,bool link_up_wait_to_complete)4481 s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4482 bool *link_up, bool link_up_wait_to_complete)
4483 {
4484 u32 status;
4485 u16 i, autoneg_status = 0;
4486
4487 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
4488 return IXGBE_ERR_CONFIG;
4489
4490 status = ixgbe_check_mac_link_generic(hw, speed, link_up,
4491 link_up_wait_to_complete);
4492
4493 /* If check link fails or MAC link is not up, then return */
4494 if (status != IXGBE_SUCCESS || !(*link_up))
4495 return status;
4496
4497 /* MAC link is up, so check external PHY link.
4498 * X557 PHY. Link status is latching low, and can only be used to detect
4499 * link drop, and not the current status of the link without performing
4500 * back-to-back reads.
4501 */
4502 for (i = 0; i < 2; i++) {
4503 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
4504 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4505 &autoneg_status);
4506
4507 if (status != IXGBE_SUCCESS)
4508 return status;
4509 }
4510
4511 /* If external PHY link is not up, then indicate link not up */
4512 if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
4513 *link_up = FALSE;
4514
4515 return IXGBE_SUCCESS;
4516 }
4517
4518 /**
4519 * ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI
4520 * @hw: pointer to hardware structure
4521 **/
ixgbe_reset_phy_t_X550em(struct ixgbe_hw * hw)4522 s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
4523 {
4524 s32 status;
4525
4526 status = ixgbe_reset_phy_generic(hw);
4527
4528 if (status != IXGBE_SUCCESS)
4529 return status;
4530
4531 /* Configure Link Status Alarm and Temperature Threshold interrupts */
4532 return ixgbe_enable_lasi_ext_t_x550em(hw);
4533 }
4534
4535 /**
4536 * ixgbe_led_on_t_X550em - Turns on the software controllable LEDs.
4537 * @hw: pointer to hardware structure
4538 * @led_idx: led number to turn on
4539 **/
ixgbe_led_on_t_X550em(struct ixgbe_hw * hw,u32 led_idx)4540 s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
4541 {
4542 u16 phy_data;
4543
4544 DEBUGFUNC("ixgbe_led_on_t_X550em");
4545
4546 if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
4547 return IXGBE_ERR_PARAM;
4548
4549 /* To turn on the LED, set mode to ON. */
4550 ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4551 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
4552 phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK;
4553 ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4554 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
4555
4556 /* Some designs have the LEDs wired to the MAC */
4557 return ixgbe_led_on_generic(hw, led_idx);
4558 }
4559
4560 /**
4561 * ixgbe_led_off_t_X550em - Turns off the software controllable LEDs.
4562 * @hw: pointer to hardware structure
4563 * @led_idx: led number to turn off
4564 **/
ixgbe_led_off_t_X550em(struct ixgbe_hw * hw,u32 led_idx)4565 s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
4566 {
4567 u16 phy_data;
4568
4569 DEBUGFUNC("ixgbe_led_off_t_X550em");
4570
4571 if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
4572 return IXGBE_ERR_PARAM;
4573
4574 /* To turn on the LED, set mode to ON. */
4575 ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4576 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
4577 phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK;
4578 ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4579 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
4580
4581 /* Some designs have the LEDs wired to the MAC */
4582 return ixgbe_led_off_generic(hw, led_idx);
4583 }
4584
4585 /**
4586 * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware
4587 * @hw: pointer to the HW structure
4588 * @maj: driver version major number
4589 * @min: driver version minor number
4590 * @build: driver version build number
4591 * @sub: driver version sub build number
4592 * @len: length of driver_ver string
4593 * @driver_ver: driver string
4594 *
4595 * Sends driver version number to firmware through the manageability
4596 * block. On success return IXGBE_SUCCESS
4597 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4598 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4599 **/
ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw * hw,u8 maj,u8 min,u8 build,u8 sub,u16 len,const char * driver_ver)4600 s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
4601 u8 build, u8 sub, u16 len, const char *driver_ver)
4602 {
4603 struct ixgbe_hic_drv_info2 fw_cmd;
4604 s32 ret_val = IXGBE_SUCCESS;
4605 int i;
4606
4607 DEBUGFUNC("ixgbe_set_fw_drv_ver_x550");
4608
4609 if ((len == 0) || (driver_ver == NULL) ||
4610 (len > sizeof(fw_cmd.driver_string)))
4611 return IXGBE_ERR_INVALID_ARGUMENT;
4612
4613 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4614 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len;
4615 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4616 fw_cmd.port_num = (u8)hw->bus.func;
4617 fw_cmd.ver_maj = maj;
4618 fw_cmd.ver_min = min;
4619 fw_cmd.ver_build = build;
4620 fw_cmd.ver_sub = sub;
4621 fw_cmd.hdr.checksum = 0;
4622 memcpy(fw_cmd.driver_string, driver_ver, len);
4623 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4624 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4625
4626 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4627 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4628 sizeof(fw_cmd),
4629 IXGBE_HI_COMMAND_TIMEOUT,
4630 TRUE);
4631 if (ret_val != IXGBE_SUCCESS)
4632 continue;
4633
4634 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4635 FW_CEM_RESP_STATUS_SUCCESS)
4636 ret_val = IXGBE_SUCCESS;
4637 else
4638 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4639
4640 break;
4641 }
4642
4643 return ret_val;
4644 }
4645