xref: /linux/drivers/infiniband/hw/hfi1/platform.c (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 /*
2  * Copyright(c) 2015, 2016 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47 
48 #include "hfi.h"
49 #include "efivar.h"
50 #include "eprom.h"
51 
52 static int validate_scratch_checksum(struct hfi1_devdata *dd)
53 {
54 	u64 checksum = 0, temp_scratch = 0;
55 	int i, j, version;
56 
57 	temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
58 	version = (temp_scratch & BITMAP_VERSION_SMASK) >> BITMAP_VERSION_SHIFT;
59 
60 	/* Prevent power on default of all zeroes from passing checksum */
61 	if (!version)
62 		return 0;
63 
64 	/*
65 	 * ASIC scratch 0 only contains the checksum and bitmap version as
66 	 * fields of interest, both of which are handled separately from the
67 	 * loop below, so skip it
68 	 */
69 	checksum += version;
70 	for (i = 1; i < ASIC_NUM_SCRATCH; i++) {
71 		temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH + (8 * i));
72 		for (j = sizeof(u64); j != 0; j -= 2) {
73 			checksum += (temp_scratch & 0xFFFF);
74 			temp_scratch >>= 16;
75 		}
76 	}
77 
78 	while (checksum >> 16)
79 		checksum = (checksum & CHECKSUM_MASK) + (checksum >> 16);
80 
81 	temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
82 	temp_scratch &= CHECKSUM_SMASK;
83 	temp_scratch >>= CHECKSUM_SHIFT;
84 
85 	if (checksum + temp_scratch == 0xFFFF)
86 		return 1;
87 	return 0;
88 }
89 
90 static void save_platform_config_fields(struct hfi1_devdata *dd)
91 {
92 	struct hfi1_pportdata *ppd = dd->pport;
93 	u64 temp_scratch = 0, temp_dest = 0;
94 
95 	temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH_1);
96 
97 	temp_dest = temp_scratch &
98 		    (dd->hfi1_id ? PORT1_PORT_TYPE_SMASK :
99 		     PORT0_PORT_TYPE_SMASK);
100 	ppd->port_type = temp_dest >>
101 			 (dd->hfi1_id ? PORT1_PORT_TYPE_SHIFT :
102 			  PORT0_PORT_TYPE_SHIFT);
103 
104 	temp_dest = temp_scratch &
105 		    (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SMASK :
106 		     PORT0_LOCAL_ATTEN_SMASK);
107 	ppd->local_atten = temp_dest >>
108 			   (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SHIFT :
109 			    PORT0_LOCAL_ATTEN_SHIFT);
110 
111 	temp_dest = temp_scratch &
112 		    (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SMASK :
113 		     PORT0_REMOTE_ATTEN_SMASK);
114 	ppd->remote_atten = temp_dest >>
115 			    (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SHIFT :
116 			     PORT0_REMOTE_ATTEN_SHIFT);
117 
118 	temp_dest = temp_scratch &
119 		    (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SMASK :
120 		     PORT0_DEFAULT_ATTEN_SMASK);
121 	ppd->default_atten = temp_dest >>
122 			     (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SHIFT :
123 			      PORT0_DEFAULT_ATTEN_SHIFT);
124 
125 	temp_scratch = read_csr(dd, dd->hfi1_id ? ASIC_CFG_SCRATCH_3 :
126 				ASIC_CFG_SCRATCH_2);
127 
128 	ppd->tx_preset_eq = (temp_scratch & TX_EQ_SMASK) >> TX_EQ_SHIFT;
129 	ppd->tx_preset_noeq = (temp_scratch & TX_NO_EQ_SMASK) >> TX_NO_EQ_SHIFT;
130 	ppd->rx_preset = (temp_scratch & RX_SMASK) >> RX_SHIFT;
131 
132 	ppd->max_power_class = (temp_scratch & QSFP_MAX_POWER_SMASK) >>
133 				QSFP_MAX_POWER_SHIFT;
134 }
135 
136 void get_platform_config(struct hfi1_devdata *dd)
137 {
138 	int ret = 0;
139 	unsigned long size = 0;
140 	u8 *temp_platform_config = NULL;
141 	u32 esize;
142 
143 	if (is_integrated(dd)) {
144 		if (validate_scratch_checksum(dd)) {
145 			save_platform_config_fields(dd);
146 			return;
147 		}
148 		dd_dev_err(dd, "%s: Config bitmap corrupted/uninitialized\n",
149 			   __func__);
150 		dd_dev_err(dd,
151 			   "%s: Please update your BIOS to support active channels\n",
152 			   __func__);
153 	} else {
154 		ret = eprom_read_platform_config(dd,
155 						 (void **)&temp_platform_config,
156 						 &esize);
157 		if (!ret) {
158 			/* success */
159 			dd->platform_config.data = temp_platform_config;
160 			dd->platform_config.size = esize;
161 			return;
162 		}
163 		/* fail, try EFI variable */
164 
165 		ret = read_hfi1_efi_var(dd, "configuration", &size,
166 					(void **)&temp_platform_config);
167 		if (!ret) {
168 			dd->platform_config.data = temp_platform_config;
169 			dd->platform_config.size = size;
170 			return;
171 		}
172 	}
173 	dd_dev_err(dd,
174 		   "%s: Failed to get platform config, falling back to sub-optimal default file\n",
175 		   __func__);
176 	/* fall back to request firmware */
177 	platform_config_load = 1;
178 }
179 
180 void free_platform_config(struct hfi1_devdata *dd)
181 {
182 	if (!platform_config_load) {
183 		/*
184 		 * was loaded from EFI or the EPROM, release memory
185 		 * allocated by read_efi_var/eprom_read_platform_config
186 		 */
187 		kfree(dd->platform_config.data);
188 	}
189 	/*
190 	 * else do nothing, dispose_firmware will release
191 	 * struct firmware platform_config on driver exit
192 	 */
193 }
194 
195 void get_port_type(struct hfi1_pportdata *ppd)
196 {
197 	int ret;
198 	u32 temp;
199 
200 	ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
201 					PORT_TABLE_PORT_TYPE, &temp,
202 					4);
203 	if (ret) {
204 		ppd->port_type = PORT_TYPE_UNKNOWN;
205 		return;
206 	}
207 	ppd->port_type = temp;
208 }
209 
210 int set_qsfp_tx(struct hfi1_pportdata *ppd, int on)
211 {
212 	u8 tx_ctrl_byte = on ? 0x0 : 0xF;
213 	int ret = 0;
214 
215 	ret = qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_TX_CTRL_BYTE_OFFS,
216 			 &tx_ctrl_byte, 1);
217 	/* we expected 1, so consider 0 an error */
218 	if (ret == 0)
219 		ret = -EIO;
220 	else if (ret == 1)
221 		ret = 0;
222 	return ret;
223 }
224 
225 static int qual_power(struct hfi1_pportdata *ppd)
226 {
227 	u32 cable_power_class = 0, power_class_max = 0;
228 	u8 *cache = ppd->qsfp_info.cache;
229 	int ret = 0;
230 
231 	ret = get_platform_config_field(
232 		ppd->dd, PLATFORM_CONFIG_SYSTEM_TABLE, 0,
233 		SYSTEM_TABLE_QSFP_POWER_CLASS_MAX, &power_class_max, 4);
234 	if (ret)
235 		return ret;
236 
237 	cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
238 
239 	if (cable_power_class > power_class_max)
240 		ppd->offline_disabled_reason =
241 			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY);
242 
243 	if (ppd->offline_disabled_reason ==
244 			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY)) {
245 		dd_dev_info(
246 			ppd->dd,
247 			"%s: Port disabled due to system power restrictions\n",
248 			__func__);
249 		ret = -EPERM;
250 	}
251 	return ret;
252 }
253 
254 static int qual_bitrate(struct hfi1_pportdata *ppd)
255 {
256 	u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
257 	u8 *cache = ppd->qsfp_info.cache;
258 
259 	if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G) &&
260 	    cache[QSFP_NOM_BIT_RATE_250_OFFS] < 0x64)
261 		ppd->offline_disabled_reason =
262 			   HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
263 
264 	if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G) &&
265 	    cache[QSFP_NOM_BIT_RATE_100_OFFS] < 0x7D)
266 		ppd->offline_disabled_reason =
267 			   HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
268 
269 	if (ppd->offline_disabled_reason ==
270 			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY)) {
271 		dd_dev_info(
272 			ppd->dd,
273 			"%s: Cable failed bitrate check, disabling port\n",
274 			__func__);
275 		return -EPERM;
276 	}
277 	return 0;
278 }
279 
280 static int set_qsfp_high_power(struct hfi1_pportdata *ppd)
281 {
282 	u8 cable_power_class = 0, power_ctrl_byte = 0;
283 	u8 *cache = ppd->qsfp_info.cache;
284 	int ret;
285 
286 	cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
287 
288 	if (cable_power_class > QSFP_POWER_CLASS_1) {
289 		power_ctrl_byte = cache[QSFP_PWR_CTRL_BYTE_OFFS];
290 
291 		power_ctrl_byte |= 1;
292 		power_ctrl_byte &= ~(0x2);
293 
294 		ret = qsfp_write(ppd, ppd->dd->hfi1_id,
295 				 QSFP_PWR_CTRL_BYTE_OFFS,
296 				 &power_ctrl_byte, 1);
297 		if (ret != 1)
298 			return -EIO;
299 
300 		if (cable_power_class > QSFP_POWER_CLASS_4) {
301 			power_ctrl_byte |= (1 << 2);
302 			ret = qsfp_write(ppd, ppd->dd->hfi1_id,
303 					 QSFP_PWR_CTRL_BYTE_OFFS,
304 					 &power_ctrl_byte, 1);
305 			if (ret != 1)
306 				return -EIO;
307 		}
308 
309 		/* SFF 8679 rev 1.7 LPMode Deassert time */
310 		msleep(300);
311 	}
312 	return 0;
313 }
314 
315 static void apply_rx_cdr(struct hfi1_pportdata *ppd,
316 			 u32 rx_preset_index,
317 			 u8 *cdr_ctrl_byte)
318 {
319 	u32 rx_preset;
320 	u8 *cache = ppd->qsfp_info.cache;
321 	int cable_power_class;
322 
323 	if (!((cache[QSFP_MOD_PWR_OFFS] & 0x4) &&
324 	      (cache[QSFP_CDR_INFO_OFFS] & 0x40)))
325 		return;
326 
327 	/* RX CDR present, bypass supported */
328 	cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
329 
330 	if (cable_power_class <= QSFP_POWER_CLASS_3) {
331 		/* Power class <= 3, ignore config & turn RX CDR on */
332 		*cdr_ctrl_byte |= 0xF;
333 		return;
334 	}
335 
336 	get_platform_config_field(
337 		ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
338 		rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR_APPLY,
339 		&rx_preset, 4);
340 
341 	if (!rx_preset) {
342 		dd_dev_info(
343 			ppd->dd,
344 			"%s: RX_CDR_APPLY is set to disabled\n",
345 			__func__);
346 		return;
347 	}
348 	get_platform_config_field(
349 		ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
350 		rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR,
351 		&rx_preset, 4);
352 
353 	/* Expand cdr setting to all 4 lanes */
354 	rx_preset = (rx_preset | (rx_preset << 1) |
355 			(rx_preset << 2) | (rx_preset << 3));
356 
357 	if (rx_preset) {
358 		*cdr_ctrl_byte |= rx_preset;
359 	} else {
360 		*cdr_ctrl_byte &= rx_preset;
361 		/* Preserve current TX CDR status */
362 		*cdr_ctrl_byte |= (cache[QSFP_CDR_CTRL_BYTE_OFFS] & 0xF0);
363 	}
364 }
365 
366 static void apply_tx_cdr(struct hfi1_pportdata *ppd,
367 			 u32 tx_preset_index,
368 			 u8 *cdr_ctrl_byte)
369 {
370 	u32 tx_preset;
371 	u8 *cache = ppd->qsfp_info.cache;
372 	int cable_power_class;
373 
374 	if (!((cache[QSFP_MOD_PWR_OFFS] & 0x8) &&
375 	      (cache[QSFP_CDR_INFO_OFFS] & 0x80)))
376 		return;
377 
378 	/* TX CDR present, bypass supported */
379 	cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
380 
381 	if (cable_power_class <= QSFP_POWER_CLASS_3) {
382 		/* Power class <= 3, ignore config & turn TX CDR on */
383 		*cdr_ctrl_byte |= 0xF0;
384 		return;
385 	}
386 
387 	get_platform_config_field(
388 		ppd->dd,
389 		PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
390 		TX_PRESET_TABLE_QSFP_TX_CDR_APPLY, &tx_preset, 4);
391 
392 	if (!tx_preset) {
393 		dd_dev_info(
394 			ppd->dd,
395 			"%s: TX_CDR_APPLY is set to disabled\n",
396 			__func__);
397 		return;
398 	}
399 	get_platform_config_field(
400 		ppd->dd,
401 		PLATFORM_CONFIG_TX_PRESET_TABLE,
402 		tx_preset_index,
403 		TX_PRESET_TABLE_QSFP_TX_CDR, &tx_preset, 4);
404 
405 	/* Expand cdr setting to all 4 lanes */
406 	tx_preset = (tx_preset | (tx_preset << 1) |
407 			(tx_preset << 2) | (tx_preset << 3));
408 
409 	if (tx_preset)
410 		*cdr_ctrl_byte |= (tx_preset << 4);
411 	else
412 		/* Preserve current/determined RX CDR status */
413 		*cdr_ctrl_byte &= ((tx_preset << 4) | 0xF);
414 }
415 
416 static void apply_cdr_settings(
417 		struct hfi1_pportdata *ppd, u32 rx_preset_index,
418 		u32 tx_preset_index)
419 {
420 	u8 *cache = ppd->qsfp_info.cache;
421 	u8 cdr_ctrl_byte = cache[QSFP_CDR_CTRL_BYTE_OFFS];
422 
423 	apply_rx_cdr(ppd, rx_preset_index, &cdr_ctrl_byte);
424 
425 	apply_tx_cdr(ppd, tx_preset_index, &cdr_ctrl_byte);
426 
427 	qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS,
428 		   &cdr_ctrl_byte, 1);
429 }
430 
431 static void apply_tx_eq_auto(struct hfi1_pportdata *ppd)
432 {
433 	u8 *cache = ppd->qsfp_info.cache;
434 	u8 tx_eq;
435 
436 	if (!(cache[QSFP_EQ_INFO_OFFS] & 0x8))
437 		return;
438 	/* Disable adaptive TX EQ if present */
439 	tx_eq = cache[(128 * 3) + 241];
440 	tx_eq &= 0xF0;
441 	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 241, &tx_eq, 1);
442 }
443 
444 static void apply_tx_eq_prog(struct hfi1_pportdata *ppd, u32 tx_preset_index)
445 {
446 	u8 *cache = ppd->qsfp_info.cache;
447 	u32 tx_preset;
448 	u8 tx_eq;
449 
450 	if (!(cache[QSFP_EQ_INFO_OFFS] & 0x4))
451 		return;
452 
453 	get_platform_config_field(
454 		ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
455 		tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ_APPLY,
456 		&tx_preset, 4);
457 	if (!tx_preset) {
458 		dd_dev_info(
459 			ppd->dd,
460 			"%s: TX_EQ_APPLY is set to disabled\n",
461 			__func__);
462 		return;
463 	}
464 	get_platform_config_field(
465 			ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
466 			tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ,
467 			&tx_preset, 4);
468 
469 	if (((cache[(128 * 3) + 224] & 0xF0) >> 4) < tx_preset) {
470 		dd_dev_info(
471 			ppd->dd,
472 			"%s: TX EQ %x unsupported\n",
473 			__func__, tx_preset);
474 
475 		dd_dev_info(
476 			ppd->dd,
477 			"%s: Applying EQ %x\n",
478 			__func__, cache[608] & 0xF0);
479 
480 		tx_preset = (cache[608] & 0xF0) >> 4;
481 	}
482 
483 	tx_eq = tx_preset | (tx_preset << 4);
484 	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 234, &tx_eq, 1);
485 	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 235, &tx_eq, 1);
486 }
487 
488 static void apply_rx_eq_emp(struct hfi1_pportdata *ppd, u32 rx_preset_index)
489 {
490 	u32 rx_preset;
491 	u8 rx_eq, *cache = ppd->qsfp_info.cache;
492 
493 	if (!(cache[QSFP_EQ_INFO_OFFS] & 0x2))
494 		return;
495 	get_platform_config_field(
496 			ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
497 			rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP_APPLY,
498 			&rx_preset, 4);
499 
500 	if (!rx_preset) {
501 		dd_dev_info(
502 			ppd->dd,
503 			"%s: RX_EMP_APPLY is set to disabled\n",
504 			__func__);
505 		return;
506 	}
507 	get_platform_config_field(
508 		ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
509 		rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP,
510 		&rx_preset, 4);
511 
512 	if ((cache[(128 * 3) + 224] & 0xF) < rx_preset) {
513 		dd_dev_info(
514 			ppd->dd,
515 			"%s: Requested RX EMP %x\n",
516 			__func__, rx_preset);
517 
518 		dd_dev_info(
519 			ppd->dd,
520 			"%s: Applying supported EMP %x\n",
521 			__func__, cache[608] & 0xF);
522 
523 		rx_preset = cache[608] & 0xF;
524 	}
525 
526 	rx_eq = rx_preset | (rx_preset << 4);
527 
528 	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 236, &rx_eq, 1);
529 	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 237, &rx_eq, 1);
530 }
531 
532 static void apply_eq_settings(struct hfi1_pportdata *ppd,
533 			      u32 rx_preset_index, u32 tx_preset_index)
534 {
535 	u8 *cache = ppd->qsfp_info.cache;
536 
537 	/* no point going on w/o a page 3 */
538 	if (cache[2] & 4) {
539 		dd_dev_info(ppd->dd,
540 			    "%s: Upper page 03 not present\n",
541 			    __func__);
542 		return;
543 	}
544 
545 	apply_tx_eq_auto(ppd);
546 
547 	apply_tx_eq_prog(ppd, tx_preset_index);
548 
549 	apply_rx_eq_emp(ppd, rx_preset_index);
550 }
551 
552 static void apply_rx_amplitude_settings(
553 		struct hfi1_pportdata *ppd, u32 rx_preset_index,
554 		u32 tx_preset_index)
555 {
556 	u32 rx_preset;
557 	u8 rx_amp = 0, i = 0, preferred = 0, *cache = ppd->qsfp_info.cache;
558 
559 	/* no point going on w/o a page 3 */
560 	if (cache[2] & 4) {
561 		dd_dev_info(ppd->dd,
562 			    "%s: Upper page 03 not present\n",
563 			    __func__);
564 		return;
565 	}
566 	if (!(cache[QSFP_EQ_INFO_OFFS] & 0x1)) {
567 		dd_dev_info(ppd->dd,
568 			    "%s: RX_AMP_APPLY is set to disabled\n",
569 			    __func__);
570 		return;
571 	}
572 
573 	get_platform_config_field(ppd->dd,
574 				  PLATFORM_CONFIG_RX_PRESET_TABLE,
575 				  rx_preset_index,
576 				  RX_PRESET_TABLE_QSFP_RX_AMP_APPLY,
577 				  &rx_preset, 4);
578 
579 	if (!rx_preset) {
580 		dd_dev_info(ppd->dd,
581 			    "%s: RX_AMP_APPLY is set to disabled\n",
582 			    __func__);
583 		return;
584 	}
585 	get_platform_config_field(ppd->dd,
586 				  PLATFORM_CONFIG_RX_PRESET_TABLE,
587 				  rx_preset_index,
588 				  RX_PRESET_TABLE_QSFP_RX_AMP,
589 				  &rx_preset, 4);
590 
591 	dd_dev_info(ppd->dd,
592 		    "%s: Requested RX AMP %x\n",
593 		    __func__,
594 		    rx_preset);
595 
596 	for (i = 0; i < 4; i++) {
597 		if (cache[(128 * 3) + 225] & (1 << i)) {
598 			preferred = i;
599 			if (preferred == rx_preset)
600 				break;
601 		}
602 	}
603 
604 	/*
605 	 * Verify that preferred RX amplitude is not just a
606 	 * fall through of the default
607 	 */
608 	if (!preferred && !(cache[(128 * 3) + 225] & 0x1)) {
609 		dd_dev_info(ppd->dd, "No supported RX AMP, not applying\n");
610 		return;
611 	}
612 
613 	dd_dev_info(ppd->dd,
614 		    "%s: Applying RX AMP %x\n", __func__, preferred);
615 
616 	rx_amp = preferred | (preferred << 4);
617 	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 238, &rx_amp, 1);
618 	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 239, &rx_amp, 1);
619 }
620 
621 #define OPA_INVALID_INDEX 0xFFF
622 
623 static void apply_tx_lanes(struct hfi1_pportdata *ppd, u8 field_id,
624 			   u32 config_data, const char *message)
625 {
626 	u8 i;
627 	int ret = HCMD_SUCCESS;
628 
629 	for (i = 0; i < 4; i++) {
630 		ret = load_8051_config(ppd->dd, field_id, i, config_data);
631 		if (ret != HCMD_SUCCESS) {
632 			dd_dev_err(
633 				ppd->dd,
634 				"%s: %s for lane %u failed\n",
635 				message, __func__, i);
636 		}
637 	}
638 }
639 
640 /*
641  * Return a special SerDes setting for low power AOC cables.  The power class
642  * threshold and setting being used were all found by empirical testing.
643  *
644  * Summary of the logic:
645  *
646  * if (QSFP and QSFP_TYPE == AOC and QSFP_POWER_CLASS < 4)
647  *     return 0xe
648  * return 0; // leave at default
649  */
650 static u8 aoc_low_power_setting(struct hfi1_pportdata *ppd)
651 {
652 	u8 *cache = ppd->qsfp_info.cache;
653 	int power_class;
654 
655 	/* QSFP only */
656 	if (ppd->port_type != PORT_TYPE_QSFP)
657 		return 0; /* leave at default */
658 
659 	/* active optical cables only */
660 	switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) {
661 	case 0x0 ... 0x9: /* fallthrough */
662 	case 0xC: /* fallthrough */
663 	case 0xE:
664 		/* active AOC */
665 		power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
666 		if (power_class < QSFP_POWER_CLASS_4)
667 			return 0xe;
668 	}
669 	return 0; /* leave at default */
670 }
671 
672 static void apply_tunings(
673 		struct hfi1_pportdata *ppd, u32 tx_preset_index,
674 		u8 tuning_method, u32 total_atten, u8 limiting_active)
675 {
676 	int ret = 0;
677 	u32 config_data = 0, tx_preset = 0;
678 	u8 precur = 0, attn = 0, postcur = 0, external_device_config = 0;
679 	u8 *cache = ppd->qsfp_info.cache;
680 
681 	/* Pass tuning method to 8051 */
682 	read_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
683 			 &config_data);
684 	config_data &= ~(0xff << TUNING_METHOD_SHIFT);
685 	config_data |= ((u32)tuning_method << TUNING_METHOD_SHIFT);
686 	ret = load_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
687 			       config_data);
688 	if (ret != HCMD_SUCCESS)
689 		dd_dev_err(ppd->dd, "%s: Failed to set tuning method\n",
690 			   __func__);
691 
692 	/* Set same channel loss for both TX and RX */
693 	config_data = 0 | (total_atten << 16) | (total_atten << 24);
694 	apply_tx_lanes(ppd, CHANNEL_LOSS_SETTINGS, config_data,
695 		       "Setting channel loss");
696 
697 	/* Inform 8051 of cable capabilities */
698 	if (ppd->qsfp_info.cache_valid) {
699 		external_device_config =
700 			((cache[QSFP_MOD_PWR_OFFS] & 0x4) << 3) |
701 			((cache[QSFP_MOD_PWR_OFFS] & 0x8) << 2) |
702 			((cache[QSFP_EQ_INFO_OFFS] & 0x2) << 1) |
703 			(cache[QSFP_EQ_INFO_OFFS] & 0x4);
704 		ret = read_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
705 				       GENERAL_CONFIG, &config_data);
706 		/* Clear, then set the external device config field */
707 		config_data &= ~(u32)0xFF;
708 		config_data |= external_device_config;
709 		ret = load_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
710 				       GENERAL_CONFIG, config_data);
711 		if (ret != HCMD_SUCCESS)
712 			dd_dev_info(ppd->dd,
713 				    "%s: Failed set ext device config params\n",
714 				    __func__);
715 	}
716 
717 	if (tx_preset_index == OPA_INVALID_INDEX) {
718 		if (ppd->port_type == PORT_TYPE_QSFP && limiting_active)
719 			dd_dev_info(ppd->dd, "%s: Invalid Tx preset index\n",
720 				    __func__);
721 		return;
722 	}
723 
724 	/* Following for limiting active channels only */
725 	get_platform_config_field(
726 		ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
727 		TX_PRESET_TABLE_PRECUR, &tx_preset, 4);
728 	precur = tx_preset;
729 
730 	get_platform_config_field(
731 		ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
732 		tx_preset_index, TX_PRESET_TABLE_ATTN, &tx_preset, 4);
733 	attn = tx_preset;
734 
735 	get_platform_config_field(
736 		ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
737 		tx_preset_index, TX_PRESET_TABLE_POSTCUR, &tx_preset, 4);
738 	postcur = tx_preset;
739 
740 	/*
741 	 * NOTES:
742 	 * o The aoc_low_power_setting is applied to all lanes even
743 	 *   though only lane 0's value is examined by the firmware.
744 	 * o A lingering low power setting after a cable swap does
745 	 *   not occur.  On cable unplug the 8051 is reset and
746 	 *   restarted on cable insert.  This resets all settings to
747 	 *   their default, erasing any previous low power setting.
748 	 */
749 	config_data = precur | (attn << 8) | (postcur << 16) |
750 			(aoc_low_power_setting(ppd) << 24);
751 
752 	apply_tx_lanes(ppd, TX_EQ_SETTINGS, config_data,
753 		       "Applying TX settings");
754 }
755 
756 /* Must be holding the QSFP i2c resource */
757 static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset,
758 			    u32 *ptr_rx_preset, u32 *ptr_total_atten)
759 {
760 	int ret;
761 	u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
762 	u8 *cache = ppd->qsfp_info.cache;
763 
764 	ppd->qsfp_info.limiting_active = 1;
765 
766 	ret = set_qsfp_tx(ppd, 0);
767 	if (ret)
768 		return ret;
769 
770 	ret = qual_power(ppd);
771 	if (ret)
772 		return ret;
773 
774 	ret = qual_bitrate(ppd);
775 	if (ret)
776 		return ret;
777 
778 	/*
779 	 * We'll change the QSFP memory contents from here on out, thus we set a
780 	 * flag here to remind ourselves to reset the QSFP module. This prevents
781 	 * reuse of stale settings established in our previous pass through.
782 	 */
783 	if (ppd->qsfp_info.reset_needed) {
784 		reset_qsfp(ppd);
785 		refresh_qsfp_cache(ppd, &ppd->qsfp_info);
786 	} else {
787 		ppd->qsfp_info.reset_needed = 1;
788 	}
789 
790 	ret = set_qsfp_high_power(ppd);
791 	if (ret)
792 		return ret;
793 
794 	if (cache[QSFP_EQ_INFO_OFFS] & 0x4) {
795 		ret = get_platform_config_field(
796 			ppd->dd,
797 			PLATFORM_CONFIG_PORT_TABLE, 0,
798 			PORT_TABLE_TX_PRESET_IDX_ACTIVE_EQ,
799 			ptr_tx_preset, 4);
800 		if (ret) {
801 			*ptr_tx_preset = OPA_INVALID_INDEX;
802 			return ret;
803 		}
804 	} else {
805 		ret = get_platform_config_field(
806 			ppd->dd,
807 			PLATFORM_CONFIG_PORT_TABLE, 0,
808 			PORT_TABLE_TX_PRESET_IDX_ACTIVE_NO_EQ,
809 			ptr_tx_preset, 4);
810 		if (ret) {
811 			*ptr_tx_preset = OPA_INVALID_INDEX;
812 			return ret;
813 		}
814 	}
815 
816 	ret = get_platform_config_field(
817 		ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
818 		PORT_TABLE_RX_PRESET_IDX, ptr_rx_preset, 4);
819 	if (ret) {
820 		*ptr_rx_preset = OPA_INVALID_INDEX;
821 		return ret;
822 	}
823 
824 	if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
825 		get_platform_config_field(
826 			ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
827 			PORT_TABLE_LOCAL_ATTEN_25G, ptr_total_atten, 4);
828 	else if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G))
829 		get_platform_config_field(
830 			ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
831 			PORT_TABLE_LOCAL_ATTEN_12G, ptr_total_atten, 4);
832 
833 	apply_cdr_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
834 
835 	apply_eq_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
836 
837 	apply_rx_amplitude_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
838 
839 	ret = set_qsfp_tx(ppd, 1);
840 
841 	return ret;
842 }
843 
844 static int tune_qsfp(struct hfi1_pportdata *ppd,
845 		     u32 *ptr_tx_preset, u32 *ptr_rx_preset,
846 		     u8 *ptr_tuning_method, u32 *ptr_total_atten)
847 {
848 	u32 cable_atten = 0, remote_atten = 0, platform_atten = 0;
849 	u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
850 	int ret = 0;
851 	u8 *cache = ppd->qsfp_info.cache;
852 
853 	switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) {
854 	case 0xA ... 0xB:
855 		ret = get_platform_config_field(
856 			ppd->dd,
857 			PLATFORM_CONFIG_PORT_TABLE, 0,
858 			PORT_TABLE_LOCAL_ATTEN_25G,
859 			&platform_atten, 4);
860 		if (ret)
861 			return ret;
862 
863 		if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
864 			cable_atten = cache[QSFP_CU_ATTEN_12G_OFFS];
865 		else if ((lss & OPA_LINK_SPEED_12_5G) &&
866 			 (lse & OPA_LINK_SPEED_12_5G))
867 			cable_atten = cache[QSFP_CU_ATTEN_7G_OFFS];
868 
869 		/* Fallback to configured attenuation if cable memory is bad */
870 		if (cable_atten == 0 || cable_atten > 36) {
871 			ret = get_platform_config_field(
872 				ppd->dd,
873 				PLATFORM_CONFIG_SYSTEM_TABLE, 0,
874 				SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_25G,
875 				&cable_atten, 4);
876 			if (ret)
877 				return ret;
878 		}
879 
880 		ret = get_platform_config_field(
881 			ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
882 			PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
883 		if (ret)
884 			return ret;
885 
886 		*ptr_total_atten = platform_atten + cable_atten + remote_atten;
887 
888 		*ptr_tuning_method = OPA_PASSIVE_TUNING;
889 		break;
890 	case 0x0 ... 0x9: /* fallthrough */
891 	case 0xC: /* fallthrough */
892 	case 0xE:
893 		ret = tune_active_qsfp(ppd, ptr_tx_preset, ptr_rx_preset,
894 				       ptr_total_atten);
895 		if (ret)
896 			return ret;
897 
898 		*ptr_tuning_method = OPA_ACTIVE_TUNING;
899 		break;
900 	case 0xD: /* fallthrough */
901 	case 0xF:
902 	default:
903 		dd_dev_info(ppd->dd, "%s: Unknown/unsupported cable\n",
904 			    __func__);
905 		break;
906 	}
907 	return ret;
908 }
909 
910 /*
911  * This function communicates its success or failure via ppd->driver_link_ready
912  * Thus, it depends on its association with start_link(...) which checks
913  * driver_link_ready before proceeding with the link negotiation and
914  * initialization process.
915  */
916 void tune_serdes(struct hfi1_pportdata *ppd)
917 {
918 	int ret = 0;
919 	u32 total_atten = 0;
920 	u32 remote_atten = 0, platform_atten = 0;
921 	u32 rx_preset_index, tx_preset_index;
922 	u8 tuning_method = 0, limiting_active = 0;
923 	struct hfi1_devdata *dd = ppd->dd;
924 
925 	rx_preset_index = OPA_INVALID_INDEX;
926 	tx_preset_index = OPA_INVALID_INDEX;
927 
928 	/* the link defaults to enabled */
929 	ppd->link_enabled = 1;
930 	/* the driver link ready state defaults to not ready */
931 	ppd->driver_link_ready = 0;
932 	ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
933 
934 	/* Skip the tuning for testing (loopback != none) and simulations */
935 	if (loopback != LOOPBACK_NONE ||
936 	    ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
937 		ppd->driver_link_ready = 1;
938 		return;
939 	}
940 
941 	switch (ppd->port_type) {
942 	case PORT_TYPE_DISCONNECTED:
943 		ppd->offline_disabled_reason =
944 			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_DISCONNECTED);
945 		dd_dev_info(dd, "%s: Port disconnected, disabling port\n",
946 			    __func__);
947 		goto bail;
948 	case PORT_TYPE_FIXED:
949 		/* platform_atten, remote_atten pre-zeroed to catch error */
950 		get_platform_config_field(
951 			ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
952 			PORT_TABLE_LOCAL_ATTEN_25G, &platform_atten, 4);
953 
954 		get_platform_config_field(
955 			ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
956 			PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
957 
958 		total_atten = platform_atten + remote_atten;
959 
960 		tuning_method = OPA_PASSIVE_TUNING;
961 		break;
962 	case PORT_TYPE_VARIABLE:
963 		if (qsfp_mod_present(ppd)) {
964 			/*
965 			 * platform_atten, remote_atten pre-zeroed to
966 			 * catch error
967 			 */
968 			get_platform_config_field(
969 				ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
970 				PORT_TABLE_LOCAL_ATTEN_25G,
971 				&platform_atten, 4);
972 
973 			get_platform_config_field(
974 				ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
975 				PORT_TABLE_REMOTE_ATTEN_25G,
976 				&remote_atten, 4);
977 
978 			total_atten = platform_atten + remote_atten;
979 
980 			tuning_method = OPA_PASSIVE_TUNING;
981 		} else {
982 			ppd->offline_disabled_reason =
983 			     HFI1_ODR_MASK(OPA_LINKDOWN_REASON_CHASSIS_CONFIG);
984 			goto bail;
985 		}
986 		break;
987 	case PORT_TYPE_QSFP:
988 		if (qsfp_mod_present(ppd)) {
989 			ret = acquire_chip_resource(ppd->dd,
990 						    qsfp_resource(ppd->dd),
991 						    QSFP_WAIT);
992 			if (ret) {
993 				dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n",
994 					   __func__, (int)ppd->dd->hfi1_id);
995 				goto bail;
996 			}
997 			refresh_qsfp_cache(ppd, &ppd->qsfp_info);
998 
999 			if (ppd->qsfp_info.cache_valid) {
1000 				ret = tune_qsfp(ppd,
1001 						&tx_preset_index,
1002 						&rx_preset_index,
1003 						&tuning_method,
1004 						&total_atten);
1005 
1006 				/*
1007 				 * We may have modified the QSFP memory, so
1008 				 * update the cache to reflect the changes
1009 				 */
1010 				refresh_qsfp_cache(ppd, &ppd->qsfp_info);
1011 				limiting_active =
1012 						ppd->qsfp_info.limiting_active;
1013 			} else {
1014 				dd_dev_err(dd,
1015 					   "%s: Reading QSFP memory failed\n",
1016 					   __func__);
1017 				ret = -EINVAL; /* a fail indication */
1018 			}
1019 			release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
1020 			if (ret)
1021 				goto bail;
1022 		} else {
1023 			ppd->offline_disabled_reason =
1024 			   HFI1_ODR_MASK(
1025 				OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
1026 			goto bail;
1027 		}
1028 		break;
1029 	default:
1030 		dd_dev_info(ppd->dd, "%s: Unknown port type\n", __func__);
1031 		ppd->port_type = PORT_TYPE_UNKNOWN;
1032 		tuning_method = OPA_UNKNOWN_TUNING;
1033 		total_atten = 0;
1034 		limiting_active = 0;
1035 		tx_preset_index = OPA_INVALID_INDEX;
1036 		break;
1037 	}
1038 
1039 	if (ppd->offline_disabled_reason ==
1040 			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
1041 		apply_tunings(ppd, tx_preset_index, tuning_method,
1042 			      total_atten, limiting_active);
1043 
1044 	if (!ret)
1045 		ppd->driver_link_ready = 1;
1046 
1047 	return;
1048 bail:
1049 	ppd->driver_link_ready = 0;
1050 }
1051