xref: /linux/drivers/infiniband/hw/hfi1/platform.c (revision 0883c2c06fb5bcf5b9e008270827e63c09a88c1e)
1 /*
2  * Copyright(c) 2015, 2016 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47 
48 #include "hfi.h"
49 #include "efivar.h"
50 
51 void get_platform_config(struct hfi1_devdata *dd)
52 {
53 	int ret = 0;
54 	unsigned long size = 0;
55 	u8 *temp_platform_config = NULL;
56 
57 	ret = read_hfi1_efi_var(dd, "configuration", &size,
58 				(void **)&temp_platform_config);
59 	if (ret) {
60 		dd_dev_info(dd,
61 			    "%s: Failed to get platform config from UEFI, falling back to request firmware\n",
62 			    __func__);
63 		/* fall back to request firmware */
64 		platform_config_load = 1;
65 		goto bail;
66 	}
67 
68 	dd->platform_config.data = temp_platform_config;
69 	dd->platform_config.size = size;
70 
71 bail:
72 	/* exit */;
73 }
74 
75 void free_platform_config(struct hfi1_devdata *dd)
76 {
77 	if (!platform_config_load) {
78 		/*
79 		 * was loaded from EFI, release memory
80 		 * allocated by read_efi_var
81 		 */
82 		kfree(dd->platform_config.data);
83 	}
84 	/*
85 	 * else do nothing, dispose_firmware will release
86 	 * struct firmware platform_config on driver exit
87 	 */
88 }
89 
90 void get_port_type(struct hfi1_pportdata *ppd)
91 {
92 	int ret;
93 
94 	ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
95 					PORT_TABLE_PORT_TYPE, &ppd->port_type,
96 					4);
97 	if (ret)
98 		ppd->port_type = PORT_TYPE_UNKNOWN;
99 }
100 
101 int set_qsfp_tx(struct hfi1_pportdata *ppd, int on)
102 {
103 	u8 tx_ctrl_byte = on ? 0x0 : 0xF;
104 	int ret = 0;
105 
106 	ret = qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_TX_CTRL_BYTE_OFFS,
107 			 &tx_ctrl_byte, 1);
108 	/* we expected 1, so consider 0 an error */
109 	if (ret == 0)
110 		ret = -EIO;
111 	else if (ret == 1)
112 		ret = 0;
113 	return ret;
114 }
115 
116 static int qual_power(struct hfi1_pportdata *ppd)
117 {
118 	u32 cable_power_class = 0, power_class_max = 0;
119 	u8 *cache = ppd->qsfp_info.cache;
120 	int ret = 0;
121 
122 	ret = get_platform_config_field(
123 		ppd->dd, PLATFORM_CONFIG_SYSTEM_TABLE, 0,
124 		SYSTEM_TABLE_QSFP_POWER_CLASS_MAX, &power_class_max, 4);
125 	if (ret)
126 		return ret;
127 
128 	cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
129 
130 	if (cable_power_class > power_class_max)
131 		ppd->offline_disabled_reason =
132 			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY);
133 
134 	if (ppd->offline_disabled_reason ==
135 			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY)) {
136 		dd_dev_info(
137 			ppd->dd,
138 			"%s: Port disabled due to system power restrictions\n",
139 			__func__);
140 		ret = -EPERM;
141 	}
142 	return ret;
143 }
144 
145 static int qual_bitrate(struct hfi1_pportdata *ppd)
146 {
147 	u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
148 	u8 *cache = ppd->qsfp_info.cache;
149 
150 	if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G) &&
151 	    cache[QSFP_NOM_BIT_RATE_250_OFFS] < 0x64)
152 		ppd->offline_disabled_reason =
153 			   HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
154 
155 	if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G) &&
156 	    cache[QSFP_NOM_BIT_RATE_100_OFFS] < 0x7D)
157 		ppd->offline_disabled_reason =
158 			   HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
159 
160 	if (ppd->offline_disabled_reason ==
161 			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY)) {
162 		dd_dev_info(
163 			ppd->dd,
164 			"%s: Cable failed bitrate check, disabling port\n",
165 			__func__);
166 		return -EPERM;
167 	}
168 	return 0;
169 }
170 
171 static int set_qsfp_high_power(struct hfi1_pportdata *ppd)
172 {
173 	u8 cable_power_class = 0, power_ctrl_byte = 0;
174 	u8 *cache = ppd->qsfp_info.cache;
175 	int ret;
176 
177 	cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
178 
179 	if (cable_power_class > QSFP_POWER_CLASS_1) {
180 		power_ctrl_byte = cache[QSFP_PWR_CTRL_BYTE_OFFS];
181 
182 		power_ctrl_byte |= 1;
183 		power_ctrl_byte &= ~(0x2);
184 
185 		ret = qsfp_write(ppd, ppd->dd->hfi1_id,
186 				 QSFP_PWR_CTRL_BYTE_OFFS,
187 				 &power_ctrl_byte, 1);
188 		if (ret != 1)
189 			return -EIO;
190 
191 		if (cable_power_class > QSFP_POWER_CLASS_4) {
192 			power_ctrl_byte |= (1 << 2);
193 			ret = qsfp_write(ppd, ppd->dd->hfi1_id,
194 					 QSFP_PWR_CTRL_BYTE_OFFS,
195 					 &power_ctrl_byte, 1);
196 			if (ret != 1)
197 				return -EIO;
198 		}
199 
200 		/* SFF 8679 rev 1.7 LPMode Deassert time */
201 		msleep(300);
202 	}
203 	return 0;
204 }
205 
206 static void apply_rx_cdr(struct hfi1_pportdata *ppd,
207 			 u32 rx_preset_index,
208 			 u8 *cdr_ctrl_byte)
209 {
210 	u32 rx_preset;
211 	u8 *cache = ppd->qsfp_info.cache;
212 	int cable_power_class;
213 
214 	if (!((cache[QSFP_MOD_PWR_OFFS] & 0x4) &&
215 	      (cache[QSFP_CDR_INFO_OFFS] & 0x40)))
216 		return;
217 
218 	/* RX CDR present, bypass supported */
219 	cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
220 
221 	if (cable_power_class <= QSFP_POWER_CLASS_3) {
222 		/* Power class <= 3, ignore config & turn RX CDR on */
223 		*cdr_ctrl_byte |= 0xF;
224 		return;
225 	}
226 
227 	get_platform_config_field(
228 		ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
229 		rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR_APPLY,
230 		&rx_preset, 4);
231 
232 	if (!rx_preset) {
233 		dd_dev_info(
234 			ppd->dd,
235 			"%s: RX_CDR_APPLY is set to disabled\n",
236 			__func__);
237 		return;
238 	}
239 	get_platform_config_field(
240 		ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
241 		rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR,
242 		&rx_preset, 4);
243 
244 	/* Expand cdr setting to all 4 lanes */
245 	rx_preset = (rx_preset | (rx_preset << 1) |
246 			(rx_preset << 2) | (rx_preset << 3));
247 
248 	if (rx_preset) {
249 		*cdr_ctrl_byte |= rx_preset;
250 	} else {
251 		*cdr_ctrl_byte &= rx_preset;
252 		/* Preserve current TX CDR status */
253 		*cdr_ctrl_byte |= (cache[QSFP_CDR_CTRL_BYTE_OFFS] & 0xF0);
254 	}
255 }
256 
257 static void apply_tx_cdr(struct hfi1_pportdata *ppd,
258 			 u32 tx_preset_index,
259 			 u8 *cdr_ctrl_byte)
260 {
261 	u32 tx_preset;
262 	u8 *cache = ppd->qsfp_info.cache;
263 	int cable_power_class;
264 
265 	if (!((cache[QSFP_MOD_PWR_OFFS] & 0x8) &&
266 	      (cache[QSFP_CDR_INFO_OFFS] & 0x80)))
267 		return;
268 
269 	/* TX CDR present, bypass supported */
270 	cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
271 
272 	if (cable_power_class <= QSFP_POWER_CLASS_3) {
273 		/* Power class <= 3, ignore config & turn TX CDR on */
274 		*cdr_ctrl_byte |= 0xF0;
275 		return;
276 	}
277 
278 	get_platform_config_field(
279 		ppd->dd,
280 		PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
281 		TX_PRESET_TABLE_QSFP_TX_CDR_APPLY, &tx_preset, 4);
282 
283 	if (!tx_preset) {
284 		dd_dev_info(
285 			ppd->dd,
286 			"%s: TX_CDR_APPLY is set to disabled\n",
287 			__func__);
288 		return;
289 	}
290 	get_platform_config_field(
291 		ppd->dd,
292 		PLATFORM_CONFIG_TX_PRESET_TABLE,
293 		tx_preset_index,
294 		TX_PRESET_TABLE_QSFP_TX_CDR, &tx_preset, 4);
295 
296 	/* Expand cdr setting to all 4 lanes */
297 	tx_preset = (tx_preset | (tx_preset << 1) |
298 			(tx_preset << 2) | (tx_preset << 3));
299 
300 	if (tx_preset)
301 		*cdr_ctrl_byte |= (tx_preset << 4);
302 	else
303 		/* Preserve current/determined RX CDR status */
304 		*cdr_ctrl_byte &= ((tx_preset << 4) | 0xF);
305 }
306 
307 static void apply_cdr_settings(
308 		struct hfi1_pportdata *ppd, u32 rx_preset_index,
309 		u32 tx_preset_index)
310 {
311 	u8 *cache = ppd->qsfp_info.cache;
312 	u8 cdr_ctrl_byte = cache[QSFP_CDR_CTRL_BYTE_OFFS];
313 
314 	apply_rx_cdr(ppd, rx_preset_index, &cdr_ctrl_byte);
315 
316 	apply_tx_cdr(ppd, tx_preset_index, &cdr_ctrl_byte);
317 
318 	qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS,
319 		   &cdr_ctrl_byte, 1);
320 }
321 
322 static void apply_tx_eq_auto(struct hfi1_pportdata *ppd)
323 {
324 	u8 *cache = ppd->qsfp_info.cache;
325 	u8 tx_eq;
326 
327 	if (!(cache[QSFP_EQ_INFO_OFFS] & 0x8))
328 		return;
329 	/* Disable adaptive TX EQ if present */
330 	tx_eq = cache[(128 * 3) + 241];
331 	tx_eq &= 0xF0;
332 	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 241, &tx_eq, 1);
333 }
334 
335 static void apply_tx_eq_prog(struct hfi1_pportdata *ppd, u32 tx_preset_index)
336 {
337 	u8 *cache = ppd->qsfp_info.cache;
338 	u32 tx_preset;
339 	u8 tx_eq;
340 
341 	if (!(cache[QSFP_EQ_INFO_OFFS] & 0x4))
342 		return;
343 
344 	get_platform_config_field(
345 		ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
346 		tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ_APPLY,
347 		&tx_preset, 4);
348 	if (!tx_preset) {
349 		dd_dev_info(
350 			ppd->dd,
351 			"%s: TX_EQ_APPLY is set to disabled\n",
352 			__func__);
353 		return;
354 	}
355 	get_platform_config_field(
356 			ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
357 			tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ,
358 			&tx_preset, 4);
359 
360 	if (((cache[(128 * 3) + 224] & 0xF0) >> 4) < tx_preset) {
361 		dd_dev_info(
362 			ppd->dd,
363 			"%s: TX EQ %x unsupported\n",
364 			__func__, tx_preset);
365 
366 		dd_dev_info(
367 			ppd->dd,
368 			"%s: Applying EQ %x\n",
369 			__func__, cache[608] & 0xF0);
370 
371 		tx_preset = (cache[608] & 0xF0) >> 4;
372 	}
373 
374 	tx_eq = tx_preset | (tx_preset << 4);
375 	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 234, &tx_eq, 1);
376 	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 235, &tx_eq, 1);
377 }
378 
379 static void apply_rx_eq_emp(struct hfi1_pportdata *ppd, u32 rx_preset_index)
380 {
381 	u32 rx_preset;
382 	u8 rx_eq, *cache = ppd->qsfp_info.cache;
383 
384 	if (!(cache[QSFP_EQ_INFO_OFFS] & 0x2))
385 		return;
386 	get_platform_config_field(
387 			ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
388 			rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP_APPLY,
389 			&rx_preset, 4);
390 
391 	if (!rx_preset) {
392 		dd_dev_info(
393 			ppd->dd,
394 			"%s: RX_EMP_APPLY is set to disabled\n",
395 			__func__);
396 		return;
397 	}
398 	get_platform_config_field(
399 		ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
400 		rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP,
401 		&rx_preset, 4);
402 
403 	if ((cache[(128 * 3) + 224] & 0xF) < rx_preset) {
404 		dd_dev_info(
405 			ppd->dd,
406 			"%s: Requested RX EMP %x\n",
407 			__func__, rx_preset);
408 
409 		dd_dev_info(
410 			ppd->dd,
411 			"%s: Applying supported EMP %x\n",
412 			__func__, cache[608] & 0xF);
413 
414 		rx_preset = cache[608] & 0xF;
415 	}
416 
417 	rx_eq = rx_preset | (rx_preset << 4);
418 
419 	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 236, &rx_eq, 1);
420 	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 237, &rx_eq, 1);
421 }
422 
423 static void apply_eq_settings(struct hfi1_pportdata *ppd,
424 			      u32 rx_preset_index, u32 tx_preset_index)
425 {
426 	u8 *cache = ppd->qsfp_info.cache;
427 
428 	/* no point going on w/o a page 3 */
429 	if (cache[2] & 4) {
430 		dd_dev_info(ppd->dd,
431 			    "%s: Upper page 03 not present\n",
432 			    __func__);
433 		return;
434 	}
435 
436 	apply_tx_eq_auto(ppd);
437 
438 	apply_tx_eq_prog(ppd, tx_preset_index);
439 
440 	apply_rx_eq_emp(ppd, rx_preset_index);
441 }
442 
443 static void apply_rx_amplitude_settings(
444 		struct hfi1_pportdata *ppd, u32 rx_preset_index,
445 		u32 tx_preset_index)
446 {
447 	u32 rx_preset;
448 	u8 rx_amp = 0, i = 0, preferred = 0, *cache = ppd->qsfp_info.cache;
449 
450 	/* no point going on w/o a page 3 */
451 	if (cache[2] & 4) {
452 		dd_dev_info(ppd->dd,
453 			    "%s: Upper page 03 not present\n",
454 			    __func__);
455 		return;
456 	}
457 	if (!(cache[QSFP_EQ_INFO_OFFS] & 0x1)) {
458 		dd_dev_info(ppd->dd,
459 			    "%s: RX_AMP_APPLY is set to disabled\n",
460 			    __func__);
461 		return;
462 	}
463 
464 	get_platform_config_field(ppd->dd,
465 				  PLATFORM_CONFIG_RX_PRESET_TABLE,
466 				  rx_preset_index,
467 				  RX_PRESET_TABLE_QSFP_RX_AMP_APPLY,
468 				  &rx_preset, 4);
469 
470 	if (!rx_preset) {
471 		dd_dev_info(ppd->dd,
472 			    "%s: RX_AMP_APPLY is set to disabled\n",
473 			    __func__);
474 		return;
475 	}
476 	get_platform_config_field(ppd->dd,
477 				  PLATFORM_CONFIG_RX_PRESET_TABLE,
478 				  rx_preset_index,
479 				  RX_PRESET_TABLE_QSFP_RX_AMP,
480 				  &rx_preset, 4);
481 
482 	dd_dev_info(ppd->dd,
483 		    "%s: Requested RX AMP %x\n",
484 		    __func__,
485 		    rx_preset);
486 
487 	for (i = 0; i < 4; i++) {
488 		if (cache[(128 * 3) + 225] & (1 << i)) {
489 			preferred = i;
490 			if (preferred == rx_preset)
491 				break;
492 		}
493 	}
494 
495 	/*
496 	 * Verify that preferred RX amplitude is not just a
497 	 * fall through of the default
498 	 */
499 	if (!preferred && !(cache[(128 * 3) + 225] & 0x1)) {
500 		dd_dev_info(ppd->dd, "No supported RX AMP, not applying\n");
501 		return;
502 	}
503 
504 	dd_dev_info(ppd->dd,
505 		    "%s: Applying RX AMP %x\n", __func__, preferred);
506 
507 	rx_amp = preferred | (preferred << 4);
508 	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 238, &rx_amp, 1);
509 	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 239, &rx_amp, 1);
510 }
511 
512 #define OPA_INVALID_INDEX 0xFFF
513 
514 static void apply_tx_lanes(struct hfi1_pportdata *ppd, u8 field_id,
515 			   u32 config_data, const char *message)
516 {
517 	u8 i;
518 	int ret = HCMD_SUCCESS;
519 
520 	for (i = 0; i < 4; i++) {
521 		ret = load_8051_config(ppd->dd, field_id, i, config_data);
522 		if (ret != HCMD_SUCCESS) {
523 			dd_dev_err(
524 				ppd->dd,
525 				"%s: %s for lane %u failed\n",
526 				message, __func__, i);
527 		}
528 	}
529 }
530 
531 static void apply_tunings(
532 		struct hfi1_pportdata *ppd, u32 tx_preset_index,
533 		u8 tuning_method, u32 total_atten, u8 limiting_active)
534 {
535 	int ret = 0;
536 	u32 config_data = 0, tx_preset = 0;
537 	u8 precur = 0, attn = 0, postcur = 0, external_device_config = 0;
538 	u8 *cache = ppd->qsfp_info.cache;
539 
540 	/* Enable external device config if channel is limiting active */
541 	read_8051_config(ppd->dd, LINK_OPTIMIZATION_SETTINGS,
542 			 GENERAL_CONFIG, &config_data);
543 	config_data &= ~(0xff << ENABLE_EXT_DEV_CONFIG_SHIFT);
544 	config_data |= ((u32)limiting_active << ENABLE_EXT_DEV_CONFIG_SHIFT);
545 	ret = load_8051_config(ppd->dd, LINK_OPTIMIZATION_SETTINGS,
546 			       GENERAL_CONFIG, config_data);
547 	if (ret != HCMD_SUCCESS)
548 		dd_dev_err(
549 			ppd->dd,
550 			"%s: Failed to set enable external device config\n",
551 			__func__);
552 
553 	config_data = 0; /* re-init  */
554 	/* Pass tuning method to 8051 */
555 	read_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
556 			 &config_data);
557 	config_data &= ~(0xff << TUNING_METHOD_SHIFT);
558 	config_data |= ((u32)tuning_method << TUNING_METHOD_SHIFT);
559 	ret = load_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
560 			       config_data);
561 	if (ret != HCMD_SUCCESS)
562 		dd_dev_err(ppd->dd, "%s: Failed to set tuning method\n",
563 			   __func__);
564 
565 	/* Set same channel loss for both TX and RX */
566 	config_data = 0 | (total_atten << 16) | (total_atten << 24);
567 	apply_tx_lanes(ppd, CHANNEL_LOSS_SETTINGS, config_data,
568 		       "Setting channel loss");
569 
570 	/* Inform 8051 of cable capabilities */
571 	if (ppd->qsfp_info.cache_valid) {
572 		external_device_config =
573 			((cache[QSFP_MOD_PWR_OFFS] & 0x4) << 3) |
574 			((cache[QSFP_MOD_PWR_OFFS] & 0x8) << 2) |
575 			((cache[QSFP_EQ_INFO_OFFS] & 0x2) << 1) |
576 			(cache[QSFP_EQ_INFO_OFFS] & 0x4);
577 		ret = read_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
578 				       GENERAL_CONFIG, &config_data);
579 		/* Clear, then set the external device config field */
580 		config_data &= ~(u32)0xFF;
581 		config_data |= external_device_config;
582 		ret = load_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
583 				       GENERAL_CONFIG, config_data);
584 		if (ret != HCMD_SUCCESS)
585 			dd_dev_info(ppd->dd,
586 				    "%s: Failed set ext device config params\n",
587 				    __func__);
588 	}
589 
590 	if (tx_preset_index == OPA_INVALID_INDEX) {
591 		if (ppd->port_type == PORT_TYPE_QSFP && limiting_active)
592 			dd_dev_info(ppd->dd, "%s: Invalid Tx preset index\n",
593 				    __func__);
594 		return;
595 	}
596 
597 	/* Following for limiting active channels only */
598 	get_platform_config_field(
599 		ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
600 		TX_PRESET_TABLE_PRECUR, &tx_preset, 4);
601 	precur = tx_preset;
602 
603 	get_platform_config_field(
604 		ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
605 		tx_preset_index, TX_PRESET_TABLE_ATTN, &tx_preset, 4);
606 	attn = tx_preset;
607 
608 	get_platform_config_field(
609 		ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
610 		tx_preset_index, TX_PRESET_TABLE_POSTCUR, &tx_preset, 4);
611 	postcur = tx_preset;
612 
613 	config_data = precur | (attn << 8) | (postcur << 16);
614 
615 	apply_tx_lanes(ppd, TX_EQ_SETTINGS, config_data,
616 		       "Applying TX settings");
617 }
618 
619 /* Must be holding the QSFP i2c resource */
620 static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset,
621 			    u32 *ptr_rx_preset, u32 *ptr_total_atten)
622 {
623 	int ret;
624 	u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
625 	u8 *cache = ppd->qsfp_info.cache;
626 
627 	ppd->qsfp_info.limiting_active = 1;
628 
629 	ret = set_qsfp_tx(ppd, 0);
630 	if (ret)
631 		return ret;
632 
633 	ret = qual_power(ppd);
634 	if (ret)
635 		return ret;
636 
637 	ret = qual_bitrate(ppd);
638 	if (ret)
639 		return ret;
640 
641 	if (ppd->qsfp_info.reset_needed) {
642 		reset_qsfp(ppd);
643 		ppd->qsfp_info.reset_needed = 0;
644 		refresh_qsfp_cache(ppd, &ppd->qsfp_info);
645 	} else {
646 		ppd->qsfp_info.reset_needed = 1;
647 	}
648 
649 	ret = set_qsfp_high_power(ppd);
650 	if (ret)
651 		return ret;
652 
653 	if (cache[QSFP_EQ_INFO_OFFS] & 0x4) {
654 		ret = get_platform_config_field(
655 			ppd->dd,
656 			PLATFORM_CONFIG_PORT_TABLE, 0,
657 			PORT_TABLE_TX_PRESET_IDX_ACTIVE_EQ,
658 			ptr_tx_preset, 4);
659 		if (ret) {
660 			*ptr_tx_preset = OPA_INVALID_INDEX;
661 			return ret;
662 		}
663 	} else {
664 		ret = get_platform_config_field(
665 			ppd->dd,
666 			PLATFORM_CONFIG_PORT_TABLE, 0,
667 			PORT_TABLE_TX_PRESET_IDX_ACTIVE_NO_EQ,
668 			ptr_tx_preset, 4);
669 		if (ret) {
670 			*ptr_tx_preset = OPA_INVALID_INDEX;
671 			return ret;
672 		}
673 	}
674 
675 	ret = get_platform_config_field(
676 		ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
677 		PORT_TABLE_RX_PRESET_IDX, ptr_rx_preset, 4);
678 	if (ret) {
679 		*ptr_rx_preset = OPA_INVALID_INDEX;
680 		return ret;
681 	}
682 
683 	if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
684 		get_platform_config_field(
685 			ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
686 			PORT_TABLE_LOCAL_ATTEN_25G, ptr_total_atten, 4);
687 	else if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G))
688 		get_platform_config_field(
689 			ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
690 			PORT_TABLE_LOCAL_ATTEN_12G, ptr_total_atten, 4);
691 
692 	apply_cdr_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
693 
694 	apply_eq_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
695 
696 	apply_rx_amplitude_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
697 
698 	ret = set_qsfp_tx(ppd, 1);
699 
700 	return ret;
701 }
702 
703 static int tune_qsfp(struct hfi1_pportdata *ppd,
704 		     u32 *ptr_tx_preset, u32 *ptr_rx_preset,
705 		     u8 *ptr_tuning_method, u32 *ptr_total_atten)
706 {
707 	u32 cable_atten = 0, remote_atten = 0, platform_atten = 0;
708 	u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
709 	int ret = 0;
710 	u8 *cache = ppd->qsfp_info.cache;
711 
712 	switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) {
713 	case 0xA ... 0xB:
714 		ret = get_platform_config_field(
715 			ppd->dd,
716 			PLATFORM_CONFIG_PORT_TABLE, 0,
717 			PORT_TABLE_LOCAL_ATTEN_25G,
718 			&platform_atten, 4);
719 		if (ret)
720 			return ret;
721 
722 		if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
723 			cable_atten = cache[QSFP_CU_ATTEN_12G_OFFS];
724 		else if ((lss & OPA_LINK_SPEED_12_5G) &&
725 			 (lse & OPA_LINK_SPEED_12_5G))
726 			cable_atten = cache[QSFP_CU_ATTEN_7G_OFFS];
727 
728 		/* Fallback to configured attenuation if cable memory is bad */
729 		if (cable_atten == 0 || cable_atten > 36) {
730 			ret = get_platform_config_field(
731 				ppd->dd,
732 				PLATFORM_CONFIG_SYSTEM_TABLE, 0,
733 				SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_25G,
734 				&cable_atten, 4);
735 			if (ret)
736 				return ret;
737 		}
738 
739 		ret = get_platform_config_field(
740 			ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
741 			PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
742 		if (ret)
743 			return ret;
744 
745 		*ptr_total_atten = platform_atten + cable_atten + remote_atten;
746 
747 		*ptr_tuning_method = OPA_PASSIVE_TUNING;
748 		break;
749 	case 0x0 ... 0x9: /* fallthrough */
750 	case 0xC: /* fallthrough */
751 	case 0xE:
752 		ret = tune_active_qsfp(ppd, ptr_tx_preset, ptr_rx_preset,
753 				       ptr_total_atten);
754 		if (ret)
755 			return ret;
756 
757 		*ptr_tuning_method = OPA_ACTIVE_TUNING;
758 		break;
759 	case 0xD: /* fallthrough */
760 	case 0xF:
761 	default:
762 		dd_dev_info(ppd->dd, "%s: Unknown/unsupported cable\n",
763 			    __func__);
764 		break;
765 	}
766 	return ret;
767 }
768 
769 /*
770  * This function communicates its success or failure via ppd->driver_link_ready
771  * Thus, it depends on its association with start_link(...) which checks
772  * driver_link_ready before proceeding with the link negotiation and
773  * initialization process.
774  */
775 void tune_serdes(struct hfi1_pportdata *ppd)
776 {
777 	int ret = 0;
778 	u32 total_atten = 0;
779 	u32 remote_atten = 0, platform_atten = 0;
780 	u32 rx_preset_index, tx_preset_index;
781 	u8 tuning_method = 0, limiting_active = 0;
782 	struct hfi1_devdata *dd = ppd->dd;
783 
784 	rx_preset_index = OPA_INVALID_INDEX;
785 	tx_preset_index = OPA_INVALID_INDEX;
786 
787 	/* the link defaults to enabled */
788 	ppd->link_enabled = 1;
789 	/* the driver link ready state defaults to not ready */
790 	ppd->driver_link_ready = 0;
791 	ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
792 
793 	/* Skip the tuning for testing (loopback != none) and simulations */
794 	if (loopback != LOOPBACK_NONE ||
795 	    ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
796 		ppd->driver_link_ready = 1;
797 		return;
798 	}
799 
800 	switch (ppd->port_type) {
801 	case PORT_TYPE_DISCONNECTED:
802 		ppd->offline_disabled_reason =
803 			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_DISCONNECTED);
804 		dd_dev_info(dd, "%s: Port disconnected, disabling port\n",
805 			    __func__);
806 		goto bail;
807 	case PORT_TYPE_FIXED:
808 		/* platform_atten, remote_atten pre-zeroed to catch error */
809 		get_platform_config_field(
810 			ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
811 			PORT_TABLE_LOCAL_ATTEN_25G, &platform_atten, 4);
812 
813 		get_platform_config_field(
814 			ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
815 			PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
816 
817 		total_atten = platform_atten + remote_atten;
818 
819 		tuning_method = OPA_PASSIVE_TUNING;
820 		break;
821 	case PORT_TYPE_VARIABLE:
822 		if (qsfp_mod_present(ppd)) {
823 			/*
824 			 * platform_atten, remote_atten pre-zeroed to
825 			 * catch error
826 			 */
827 			get_platform_config_field(
828 				ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
829 				PORT_TABLE_LOCAL_ATTEN_25G,
830 				&platform_atten, 4);
831 
832 			get_platform_config_field(
833 				ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
834 				PORT_TABLE_REMOTE_ATTEN_25G,
835 				&remote_atten, 4);
836 
837 			total_atten = platform_atten + remote_atten;
838 
839 			tuning_method = OPA_PASSIVE_TUNING;
840 		} else {
841 			ppd->offline_disabled_reason =
842 			     HFI1_ODR_MASK(OPA_LINKDOWN_REASON_CHASSIS_CONFIG);
843 			goto bail;
844 		}
845 		break;
846 	case PORT_TYPE_QSFP:
847 		if (qsfp_mod_present(ppd)) {
848 			ret = acquire_chip_resource(ppd->dd,
849 						    qsfp_resource(ppd->dd),
850 						    QSFP_WAIT);
851 			if (ret) {
852 				dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n",
853 					   __func__, (int)ppd->dd->hfi1_id);
854 				goto bail;
855 			}
856 			refresh_qsfp_cache(ppd, &ppd->qsfp_info);
857 
858 			if (ppd->qsfp_info.cache_valid) {
859 				ret = tune_qsfp(ppd,
860 						&tx_preset_index,
861 						&rx_preset_index,
862 						&tuning_method,
863 						&total_atten);
864 
865 				/*
866 				 * We may have modified the QSFP memory, so
867 				 * update the cache to reflect the changes
868 				 */
869 				refresh_qsfp_cache(ppd, &ppd->qsfp_info);
870 				limiting_active =
871 						ppd->qsfp_info.limiting_active;
872 			} else {
873 				dd_dev_err(dd,
874 					   "%s: Reading QSFP memory failed\n",
875 					   __func__);
876 				ret = -EINVAL; /* a fail indication */
877 			}
878 			release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
879 			if (ret)
880 				goto bail;
881 		} else {
882 			ppd->offline_disabled_reason =
883 			   HFI1_ODR_MASK(
884 				OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
885 			goto bail;
886 		}
887 		break;
888 	default:
889 		dd_dev_info(ppd->dd, "%s: Unknown port type\n", __func__);
890 		ppd->port_type = PORT_TYPE_UNKNOWN;
891 		tuning_method = OPA_UNKNOWN_TUNING;
892 		total_atten = 0;
893 		limiting_active = 0;
894 		tx_preset_index = OPA_INVALID_INDEX;
895 		break;
896 	}
897 
898 	if (ppd->offline_disabled_reason ==
899 			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
900 		apply_tunings(ppd, tx_preset_index, tuning_method,
901 			      total_atten, limiting_active);
902 
903 	if (!ret)
904 		ppd->driver_link_ready = 1;
905 
906 	return;
907 bail:
908 	ppd->driver_link_ready = 0;
909 }
910