xref: /linux/drivers/infiniband/hw/hfi1/platform.c (revision 0ea5c948cb64bab5bc7a5516774eb8536f05aa0d)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright(c) 2015, 2016 Intel Corporation.
4  */
5 
6 #include <linux/firmware.h>
7 
8 #include "hfi.h"
9 #include "efivar.h"
10 #include "eprom.h"
11 
12 #define DEFAULT_PLATFORM_CONFIG_NAME "hfi1_platform.dat"
13 
validate_scratch_checksum(struct hfi1_devdata * dd)14 static int validate_scratch_checksum(struct hfi1_devdata *dd)
15 {
16 	u64 checksum = 0, temp_scratch = 0;
17 	int i, j, version;
18 
19 	temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
20 	version = (temp_scratch & BITMAP_VERSION_SMASK) >> BITMAP_VERSION_SHIFT;
21 
22 	/* Prevent power on default of all zeroes from passing checksum */
23 	if (!version) {
24 		dd_dev_err(dd, "%s: Config bitmap uninitialized\n", __func__);
25 		dd_dev_err(dd,
26 			   "%s: Please update your BIOS to support active channels\n",
27 			   __func__);
28 		return 0;
29 	}
30 
31 	/*
32 	 * ASIC scratch 0 only contains the checksum and bitmap version as
33 	 * fields of interest, both of which are handled separately from the
34 	 * loop below, so skip it
35 	 */
36 	checksum += version;
37 	for (i = 1; i < ASIC_NUM_SCRATCH; i++) {
38 		temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH + (8 * i));
39 		for (j = sizeof(u64); j != 0; j -= 2) {
40 			checksum += (temp_scratch & 0xFFFF);
41 			temp_scratch >>= 16;
42 		}
43 	}
44 
45 	while (checksum >> 16)
46 		checksum = (checksum & CHECKSUM_MASK) + (checksum >> 16);
47 
48 	temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
49 	temp_scratch &= CHECKSUM_SMASK;
50 	temp_scratch >>= CHECKSUM_SHIFT;
51 
52 	if (checksum + temp_scratch == 0xFFFF)
53 		return 1;
54 
55 	dd_dev_err(dd, "%s: Configuration bitmap corrupted\n", __func__);
56 	return 0;
57 }
58 
save_platform_config_fields(struct hfi1_devdata * dd)59 static void save_platform_config_fields(struct hfi1_devdata *dd)
60 {
61 	struct hfi1_pportdata *ppd = dd->pport;
62 	u64 temp_scratch = 0, temp_dest = 0;
63 
64 	temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH_1);
65 
66 	temp_dest = temp_scratch &
67 		    (dd->hfi1_id ? PORT1_PORT_TYPE_SMASK :
68 		     PORT0_PORT_TYPE_SMASK);
69 	ppd->port_type = temp_dest >>
70 			 (dd->hfi1_id ? PORT1_PORT_TYPE_SHIFT :
71 			  PORT0_PORT_TYPE_SHIFT);
72 
73 	temp_dest = temp_scratch &
74 		    (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SMASK :
75 		     PORT0_LOCAL_ATTEN_SMASK);
76 	ppd->local_atten = temp_dest >>
77 			   (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SHIFT :
78 			    PORT0_LOCAL_ATTEN_SHIFT);
79 
80 	temp_dest = temp_scratch &
81 		    (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SMASK :
82 		     PORT0_REMOTE_ATTEN_SMASK);
83 	ppd->remote_atten = temp_dest >>
84 			    (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SHIFT :
85 			     PORT0_REMOTE_ATTEN_SHIFT);
86 
87 	temp_dest = temp_scratch &
88 		    (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SMASK :
89 		     PORT0_DEFAULT_ATTEN_SMASK);
90 	ppd->default_atten = temp_dest >>
91 			     (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SHIFT :
92 			      PORT0_DEFAULT_ATTEN_SHIFT);
93 
94 	temp_scratch = read_csr(dd, dd->hfi1_id ? ASIC_CFG_SCRATCH_3 :
95 				ASIC_CFG_SCRATCH_2);
96 
97 	ppd->tx_preset_eq = (temp_scratch & TX_EQ_SMASK) >> TX_EQ_SHIFT;
98 	ppd->tx_preset_noeq = (temp_scratch & TX_NO_EQ_SMASK) >> TX_NO_EQ_SHIFT;
99 	ppd->rx_preset = (temp_scratch & RX_SMASK) >> RX_SHIFT;
100 
101 	ppd->max_power_class = (temp_scratch & QSFP_MAX_POWER_SMASK) >>
102 				QSFP_MAX_POWER_SHIFT;
103 
104 	ppd->config_from_scratch = true;
105 }
106 
get_platform_config(struct hfi1_devdata * dd)107 void get_platform_config(struct hfi1_devdata *dd)
108 {
109 	int ret = 0;
110 	u8 *temp_platform_config = NULL;
111 	u32 esize;
112 	const struct firmware *platform_config_file = NULL;
113 
114 	if (is_integrated(dd)) {
115 		if (validate_scratch_checksum(dd)) {
116 			save_platform_config_fields(dd);
117 			return;
118 		}
119 	} else {
120 		ret = eprom_read_platform_config(dd,
121 						 (void **)&temp_platform_config,
122 						 &esize);
123 		if (!ret) {
124 			/* success */
125 			dd->platform_config.data = temp_platform_config;
126 			dd->platform_config.size = esize;
127 			return;
128 		}
129 	}
130 	dd_dev_err(dd,
131 		   "%s: Failed to get platform config, falling back to sub-optimal default file\n",
132 		   __func__);
133 
134 	ret = request_firmware(&platform_config_file,
135 			       DEFAULT_PLATFORM_CONFIG_NAME,
136 			       &dd->pcidev->dev);
137 	if (ret) {
138 		dd_dev_err(dd,
139 			   "%s: No default platform config file found\n",
140 			   __func__);
141 		return;
142 	}
143 
144 	/*
145 	 * Allocate separate memory block to store data and free firmware
146 	 * structure. This allows free_platform_config to treat EPROM and
147 	 * fallback configs in the same manner.
148 	 */
149 	dd->platform_config.data = kmemdup(platform_config_file->data,
150 					   platform_config_file->size,
151 					   GFP_KERNEL);
152 	dd->platform_config.size = platform_config_file->size;
153 	release_firmware(platform_config_file);
154 }
155 
free_platform_config(struct hfi1_devdata * dd)156 void free_platform_config(struct hfi1_devdata *dd)
157 {
158 	/* Release memory allocated for eprom or fallback file read. */
159 	kfree(dd->platform_config.data);
160 	dd->platform_config.data = NULL;
161 }
162 
get_port_type(struct hfi1_pportdata * ppd)163 void get_port_type(struct hfi1_pportdata *ppd)
164 {
165 	int ret;
166 	u32 temp;
167 
168 	ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
169 					PORT_TABLE_PORT_TYPE, &temp,
170 					4);
171 	if (ret) {
172 		ppd->port_type = PORT_TYPE_UNKNOWN;
173 		return;
174 	}
175 	ppd->port_type = temp;
176 }
177 
set_qsfp_tx(struct hfi1_pportdata * ppd,int on)178 int set_qsfp_tx(struct hfi1_pportdata *ppd, int on)
179 {
180 	u8 tx_ctrl_byte = on ? 0x0 : 0xF;
181 	int ret = 0;
182 
183 	ret = qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_TX_CTRL_BYTE_OFFS,
184 			 &tx_ctrl_byte, 1);
185 	/* we expected 1, so consider 0 an error */
186 	if (ret == 0)
187 		ret = -EIO;
188 	else if (ret == 1)
189 		ret = 0;
190 	return ret;
191 }
192 
qual_power(struct hfi1_pportdata * ppd)193 static int qual_power(struct hfi1_pportdata *ppd)
194 {
195 	u32 cable_power_class = 0, power_class_max = 0;
196 	u8 *cache = ppd->qsfp_info.cache;
197 	int ret = 0;
198 
199 	ret = get_platform_config_field(
200 		ppd->dd, PLATFORM_CONFIG_SYSTEM_TABLE, 0,
201 		SYSTEM_TABLE_QSFP_POWER_CLASS_MAX, &power_class_max, 4);
202 	if (ret)
203 		return ret;
204 
205 	cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
206 
207 	if (cable_power_class > power_class_max)
208 		ppd->offline_disabled_reason =
209 			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY);
210 
211 	if (ppd->offline_disabled_reason ==
212 			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY)) {
213 		dd_dev_err(
214 			ppd->dd,
215 			"%s: Port disabled due to system power restrictions\n",
216 			__func__);
217 		ret = -EPERM;
218 	}
219 	return ret;
220 }
221 
qual_bitrate(struct hfi1_pportdata * ppd)222 static int qual_bitrate(struct hfi1_pportdata *ppd)
223 {
224 	u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
225 	u8 *cache = ppd->qsfp_info.cache;
226 
227 	if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G) &&
228 	    cache[QSFP_NOM_BIT_RATE_250_OFFS] < 0x64)
229 		ppd->offline_disabled_reason =
230 			   HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
231 
232 	if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G) &&
233 	    cache[QSFP_NOM_BIT_RATE_100_OFFS] < 0x7D)
234 		ppd->offline_disabled_reason =
235 			   HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
236 
237 	if (ppd->offline_disabled_reason ==
238 			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY)) {
239 		dd_dev_err(
240 			ppd->dd,
241 			"%s: Cable failed bitrate check, disabling port\n",
242 			__func__);
243 		return -EPERM;
244 	}
245 	return 0;
246 }
247 
set_qsfp_high_power(struct hfi1_pportdata * ppd)248 static int set_qsfp_high_power(struct hfi1_pportdata *ppd)
249 {
250 	u8 cable_power_class = 0, power_ctrl_byte = 0;
251 	u8 *cache = ppd->qsfp_info.cache;
252 	int ret;
253 
254 	cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
255 
256 	if (cable_power_class > QSFP_POWER_CLASS_1) {
257 		power_ctrl_byte = cache[QSFP_PWR_CTRL_BYTE_OFFS];
258 
259 		power_ctrl_byte |= 1;
260 		power_ctrl_byte &= ~(0x2);
261 
262 		ret = qsfp_write(ppd, ppd->dd->hfi1_id,
263 				 QSFP_PWR_CTRL_BYTE_OFFS,
264 				 &power_ctrl_byte, 1);
265 		if (ret != 1)
266 			return -EIO;
267 
268 		if (cable_power_class > QSFP_POWER_CLASS_4) {
269 			power_ctrl_byte |= (1 << 2);
270 			ret = qsfp_write(ppd, ppd->dd->hfi1_id,
271 					 QSFP_PWR_CTRL_BYTE_OFFS,
272 					 &power_ctrl_byte, 1);
273 			if (ret != 1)
274 				return -EIO;
275 		}
276 
277 		/* SFF 8679 rev 1.7 LPMode Deassert time */
278 		msleep(300);
279 	}
280 	return 0;
281 }
282 
apply_rx_cdr(struct hfi1_pportdata * ppd,u32 rx_preset_index,u8 * cdr_ctrl_byte)283 static void apply_rx_cdr(struct hfi1_pportdata *ppd,
284 			 u32 rx_preset_index,
285 			 u8 *cdr_ctrl_byte)
286 {
287 	u32 rx_preset;
288 	u8 *cache = ppd->qsfp_info.cache;
289 	int cable_power_class;
290 
291 	if (!((cache[QSFP_MOD_PWR_OFFS] & 0x4) &&
292 	      (cache[QSFP_CDR_INFO_OFFS] & 0x40)))
293 		return;
294 
295 	/* RX CDR present, bypass supported */
296 	cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
297 
298 	if (cable_power_class <= QSFP_POWER_CLASS_3) {
299 		/* Power class <= 3, ignore config & turn RX CDR on */
300 		*cdr_ctrl_byte |= 0xF;
301 		return;
302 	}
303 
304 	get_platform_config_field(
305 		ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
306 		rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR_APPLY,
307 		&rx_preset, 4);
308 
309 	if (!rx_preset) {
310 		dd_dev_info(
311 			ppd->dd,
312 			"%s: RX_CDR_APPLY is set to disabled\n",
313 			__func__);
314 		return;
315 	}
316 	get_platform_config_field(
317 		ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
318 		rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR,
319 		&rx_preset, 4);
320 
321 	/* Expand cdr setting to all 4 lanes */
322 	rx_preset = (rx_preset | (rx_preset << 1) |
323 			(rx_preset << 2) | (rx_preset << 3));
324 
325 	if (rx_preset) {
326 		*cdr_ctrl_byte |= rx_preset;
327 	} else {
328 		*cdr_ctrl_byte &= rx_preset;
329 		/* Preserve current TX CDR status */
330 		*cdr_ctrl_byte |= (cache[QSFP_CDR_CTRL_BYTE_OFFS] & 0xF0);
331 	}
332 }
333 
apply_tx_cdr(struct hfi1_pportdata * ppd,u32 tx_preset_index,u8 * cdr_ctrl_byte)334 static void apply_tx_cdr(struct hfi1_pportdata *ppd,
335 			 u32 tx_preset_index,
336 			 u8 *cdr_ctrl_byte)
337 {
338 	u32 tx_preset;
339 	u8 *cache = ppd->qsfp_info.cache;
340 	int cable_power_class;
341 
342 	if (!((cache[QSFP_MOD_PWR_OFFS] & 0x8) &&
343 	      (cache[QSFP_CDR_INFO_OFFS] & 0x80)))
344 		return;
345 
346 	/* TX CDR present, bypass supported */
347 	cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
348 
349 	if (cable_power_class <= QSFP_POWER_CLASS_3) {
350 		/* Power class <= 3, ignore config & turn TX CDR on */
351 		*cdr_ctrl_byte |= 0xF0;
352 		return;
353 	}
354 
355 	get_platform_config_field(
356 		ppd->dd,
357 		PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
358 		TX_PRESET_TABLE_QSFP_TX_CDR_APPLY, &tx_preset, 4);
359 
360 	if (!tx_preset) {
361 		dd_dev_info(
362 			ppd->dd,
363 			"%s: TX_CDR_APPLY is set to disabled\n",
364 			__func__);
365 		return;
366 	}
367 	get_platform_config_field(
368 		ppd->dd,
369 		PLATFORM_CONFIG_TX_PRESET_TABLE,
370 		tx_preset_index,
371 		TX_PRESET_TABLE_QSFP_TX_CDR, &tx_preset, 4);
372 
373 	/* Expand cdr setting to all 4 lanes */
374 	tx_preset = (tx_preset | (tx_preset << 1) |
375 			(tx_preset << 2) | (tx_preset << 3));
376 
377 	if (tx_preset)
378 		*cdr_ctrl_byte |= (tx_preset << 4);
379 	else
380 		/* Preserve current/determined RX CDR status */
381 		*cdr_ctrl_byte &= ((tx_preset << 4) | 0xF);
382 }
383 
apply_cdr_settings(struct hfi1_pportdata * ppd,u32 rx_preset_index,u32 tx_preset_index)384 static void apply_cdr_settings(
385 		struct hfi1_pportdata *ppd, u32 rx_preset_index,
386 		u32 tx_preset_index)
387 {
388 	u8 *cache = ppd->qsfp_info.cache;
389 	u8 cdr_ctrl_byte = cache[QSFP_CDR_CTRL_BYTE_OFFS];
390 
391 	apply_rx_cdr(ppd, rx_preset_index, &cdr_ctrl_byte);
392 
393 	apply_tx_cdr(ppd, tx_preset_index, &cdr_ctrl_byte);
394 
395 	qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS,
396 		   &cdr_ctrl_byte, 1);
397 }
398 
apply_tx_eq_auto(struct hfi1_pportdata * ppd)399 static void apply_tx_eq_auto(struct hfi1_pportdata *ppd)
400 {
401 	u8 *cache = ppd->qsfp_info.cache;
402 	u8 tx_eq;
403 
404 	if (!(cache[QSFP_EQ_INFO_OFFS] & 0x8))
405 		return;
406 	/* Disable adaptive TX EQ if present */
407 	tx_eq = cache[(128 * 3) + 241];
408 	tx_eq &= 0xF0;
409 	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 241, &tx_eq, 1);
410 }
411 
apply_tx_eq_prog(struct hfi1_pportdata * ppd,u32 tx_preset_index)412 static void apply_tx_eq_prog(struct hfi1_pportdata *ppd, u32 tx_preset_index)
413 {
414 	u8 *cache = ppd->qsfp_info.cache;
415 	u32 tx_preset;
416 	u8 tx_eq;
417 
418 	if (!(cache[QSFP_EQ_INFO_OFFS] & 0x4))
419 		return;
420 
421 	get_platform_config_field(
422 		ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
423 		tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ_APPLY,
424 		&tx_preset, 4);
425 	if (!tx_preset) {
426 		dd_dev_info(
427 			ppd->dd,
428 			"%s: TX_EQ_APPLY is set to disabled\n",
429 			__func__);
430 		return;
431 	}
432 	get_platform_config_field(
433 			ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
434 			tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ,
435 			&tx_preset, 4);
436 
437 	if (((cache[(128 * 3) + 224] & 0xF0) >> 4) < tx_preset) {
438 		dd_dev_info(
439 			ppd->dd,
440 			"%s: TX EQ %x unsupported\n",
441 			__func__, tx_preset);
442 
443 		dd_dev_info(
444 			ppd->dd,
445 			"%s: Applying EQ %x\n",
446 			__func__, cache[608] & 0xF0);
447 
448 		tx_preset = (cache[608] & 0xF0) >> 4;
449 	}
450 
451 	tx_eq = tx_preset | (tx_preset << 4);
452 	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 234, &tx_eq, 1);
453 	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 235, &tx_eq, 1);
454 }
455 
apply_rx_eq_emp(struct hfi1_pportdata * ppd,u32 rx_preset_index)456 static void apply_rx_eq_emp(struct hfi1_pportdata *ppd, u32 rx_preset_index)
457 {
458 	u32 rx_preset;
459 	u8 rx_eq, *cache = ppd->qsfp_info.cache;
460 
461 	if (!(cache[QSFP_EQ_INFO_OFFS] & 0x2))
462 		return;
463 	get_platform_config_field(
464 			ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
465 			rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP_APPLY,
466 			&rx_preset, 4);
467 
468 	if (!rx_preset) {
469 		dd_dev_info(
470 			ppd->dd,
471 			"%s: RX_EMP_APPLY is set to disabled\n",
472 			__func__);
473 		return;
474 	}
475 	get_platform_config_field(
476 		ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
477 		rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP,
478 		&rx_preset, 4);
479 
480 	if ((cache[(128 * 3) + 224] & 0xF) < rx_preset) {
481 		dd_dev_info(
482 			ppd->dd,
483 			"%s: Requested RX EMP %x\n",
484 			__func__, rx_preset);
485 
486 		dd_dev_info(
487 			ppd->dd,
488 			"%s: Applying supported EMP %x\n",
489 			__func__, cache[608] & 0xF);
490 
491 		rx_preset = cache[608] & 0xF;
492 	}
493 
494 	rx_eq = rx_preset | (rx_preset << 4);
495 
496 	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 236, &rx_eq, 1);
497 	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 237, &rx_eq, 1);
498 }
499 
apply_eq_settings(struct hfi1_pportdata * ppd,u32 rx_preset_index,u32 tx_preset_index)500 static void apply_eq_settings(struct hfi1_pportdata *ppd,
501 			      u32 rx_preset_index, u32 tx_preset_index)
502 {
503 	u8 *cache = ppd->qsfp_info.cache;
504 
505 	/* no point going on w/o a page 3 */
506 	if (cache[2] & 4) {
507 		dd_dev_info(ppd->dd,
508 			    "%s: Upper page 03 not present\n",
509 			    __func__);
510 		return;
511 	}
512 
513 	apply_tx_eq_auto(ppd);
514 
515 	apply_tx_eq_prog(ppd, tx_preset_index);
516 
517 	apply_rx_eq_emp(ppd, rx_preset_index);
518 }
519 
apply_rx_amplitude_settings(struct hfi1_pportdata * ppd,u32 rx_preset_index,u32 tx_preset_index)520 static void apply_rx_amplitude_settings(
521 		struct hfi1_pportdata *ppd, u32 rx_preset_index,
522 		u32 tx_preset_index)
523 {
524 	u32 rx_preset;
525 	u8 rx_amp = 0, i = 0, preferred = 0, *cache = ppd->qsfp_info.cache;
526 
527 	/* no point going on w/o a page 3 */
528 	if (cache[2] & 4) {
529 		dd_dev_info(ppd->dd,
530 			    "%s: Upper page 03 not present\n",
531 			    __func__);
532 		return;
533 	}
534 	if (!(cache[QSFP_EQ_INFO_OFFS] & 0x1)) {
535 		dd_dev_info(ppd->dd,
536 			    "%s: RX_AMP_APPLY is set to disabled\n",
537 			    __func__);
538 		return;
539 	}
540 
541 	get_platform_config_field(ppd->dd,
542 				  PLATFORM_CONFIG_RX_PRESET_TABLE,
543 				  rx_preset_index,
544 				  RX_PRESET_TABLE_QSFP_RX_AMP_APPLY,
545 				  &rx_preset, 4);
546 
547 	if (!rx_preset) {
548 		dd_dev_info(ppd->dd,
549 			    "%s: RX_AMP_APPLY is set to disabled\n",
550 			    __func__);
551 		return;
552 	}
553 	get_platform_config_field(ppd->dd,
554 				  PLATFORM_CONFIG_RX_PRESET_TABLE,
555 				  rx_preset_index,
556 				  RX_PRESET_TABLE_QSFP_RX_AMP,
557 				  &rx_preset, 4);
558 
559 	dd_dev_info(ppd->dd,
560 		    "%s: Requested RX AMP %x\n",
561 		    __func__,
562 		    rx_preset);
563 
564 	for (i = 0; i < 4; i++) {
565 		if (cache[(128 * 3) + 225] & (1 << i)) {
566 			preferred = i;
567 			if (preferred == rx_preset)
568 				break;
569 		}
570 	}
571 
572 	/*
573 	 * Verify that preferred RX amplitude is not just a
574 	 * fall through of the default
575 	 */
576 	if (!preferred && !(cache[(128 * 3) + 225] & 0x1)) {
577 		dd_dev_info(ppd->dd, "No supported RX AMP, not applying\n");
578 		return;
579 	}
580 
581 	dd_dev_info(ppd->dd,
582 		    "%s: Applying RX AMP %x\n", __func__, preferred);
583 
584 	rx_amp = preferred | (preferred << 4);
585 	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 238, &rx_amp, 1);
586 	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 239, &rx_amp, 1);
587 }
588 
589 #define OPA_INVALID_INDEX 0xFFF
590 
apply_tx_lanes(struct hfi1_pportdata * ppd,u8 field_id,u32 config_data,const char * message)591 static void apply_tx_lanes(struct hfi1_pportdata *ppd, u8 field_id,
592 			   u32 config_data, const char *message)
593 {
594 	u8 i;
595 	int ret;
596 
597 	for (i = 0; i < 4; i++) {
598 		ret = load_8051_config(ppd->dd, field_id, i, config_data);
599 		if (ret != HCMD_SUCCESS) {
600 			dd_dev_err(
601 				ppd->dd,
602 				"%s: %s for lane %u failed\n",
603 				message, __func__, i);
604 		}
605 	}
606 }
607 
608 /*
609  * Return a special SerDes setting for low power AOC cables.  The power class
610  * threshold and setting being used were all found by empirical testing.
611  *
612  * Summary of the logic:
613  *
614  * if (QSFP and QSFP_TYPE == AOC and QSFP_POWER_CLASS < 4)
615  *     return 0xe
616  * return 0; // leave at default
617  */
aoc_low_power_setting(struct hfi1_pportdata * ppd)618 static u8 aoc_low_power_setting(struct hfi1_pportdata *ppd)
619 {
620 	u8 *cache = ppd->qsfp_info.cache;
621 	int power_class;
622 
623 	/* QSFP only */
624 	if (ppd->port_type != PORT_TYPE_QSFP)
625 		return 0; /* leave at default */
626 
627 	/* active optical cables only */
628 	switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) {
629 	case 0x0 ... 0x9: fallthrough;
630 	case 0xC: fallthrough;
631 	case 0xE:
632 		/* active AOC */
633 		power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
634 		if (power_class < QSFP_POWER_CLASS_4)
635 			return 0xe;
636 	}
637 	return 0; /* leave at default */
638 }
639 
apply_tunings(struct hfi1_pportdata * ppd,u32 tx_preset_index,u8 tuning_method,u32 total_atten,u8 limiting_active)640 static void apply_tunings(
641 		struct hfi1_pportdata *ppd, u32 tx_preset_index,
642 		u8 tuning_method, u32 total_atten, u8 limiting_active)
643 {
644 	int ret = 0;
645 	u32 config_data = 0, tx_preset = 0;
646 	u8 precur = 0, attn = 0, postcur = 0, external_device_config = 0;
647 	u8 *cache = ppd->qsfp_info.cache;
648 
649 	/* Pass tuning method to 8051 */
650 	read_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
651 			 &config_data);
652 	config_data &= ~(0xff << TUNING_METHOD_SHIFT);
653 	config_data |= ((u32)tuning_method << TUNING_METHOD_SHIFT);
654 	ret = load_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
655 			       config_data);
656 	if (ret != HCMD_SUCCESS)
657 		dd_dev_err(ppd->dd, "%s: Failed to set tuning method\n",
658 			   __func__);
659 
660 	/* Set same channel loss for both TX and RX */
661 	config_data = 0 | (total_atten << 16) | (total_atten << 24);
662 	apply_tx_lanes(ppd, CHANNEL_LOSS_SETTINGS, config_data,
663 		       "Setting channel loss");
664 
665 	/* Inform 8051 of cable capabilities */
666 	if (ppd->qsfp_info.cache_valid) {
667 		external_device_config =
668 			((cache[QSFP_MOD_PWR_OFFS] & 0x4) << 3) |
669 			((cache[QSFP_MOD_PWR_OFFS] & 0x8) << 2) |
670 			((cache[QSFP_EQ_INFO_OFFS] & 0x2) << 1) |
671 			(cache[QSFP_EQ_INFO_OFFS] & 0x4);
672 		ret = read_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
673 				       GENERAL_CONFIG, &config_data);
674 		/* Clear, then set the external device config field */
675 		config_data &= ~(u32)0xFF;
676 		config_data |= external_device_config;
677 		ret = load_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
678 				       GENERAL_CONFIG, config_data);
679 		if (ret != HCMD_SUCCESS)
680 			dd_dev_err(ppd->dd,
681 				   "%s: Failed set ext device config params\n",
682 				   __func__);
683 	}
684 
685 	if (tx_preset_index == OPA_INVALID_INDEX) {
686 		if (ppd->port_type == PORT_TYPE_QSFP && limiting_active)
687 			dd_dev_err(ppd->dd, "%s: Invalid Tx preset index\n",
688 				   __func__);
689 		return;
690 	}
691 
692 	/* Following for limiting active channels only */
693 	get_platform_config_field(
694 		ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
695 		TX_PRESET_TABLE_PRECUR, &tx_preset, 4);
696 	precur = tx_preset;
697 
698 	get_platform_config_field(
699 		ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
700 		tx_preset_index, TX_PRESET_TABLE_ATTN, &tx_preset, 4);
701 	attn = tx_preset;
702 
703 	get_platform_config_field(
704 		ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
705 		tx_preset_index, TX_PRESET_TABLE_POSTCUR, &tx_preset, 4);
706 	postcur = tx_preset;
707 
708 	/*
709 	 * NOTES:
710 	 * o The aoc_low_power_setting is applied to all lanes even
711 	 *   though only lane 0's value is examined by the firmware.
712 	 * o A lingering low power setting after a cable swap does
713 	 *   not occur.  On cable unplug the 8051 is reset and
714 	 *   restarted on cable insert.  This resets all settings to
715 	 *   their default, erasing any previous low power setting.
716 	 */
717 	config_data = precur | (attn << 8) | (postcur << 16) |
718 			(aoc_low_power_setting(ppd) << 24);
719 
720 	apply_tx_lanes(ppd, TX_EQ_SETTINGS, config_data,
721 		       "Applying TX settings");
722 }
723 
724 /* Must be holding the QSFP i2c resource */
tune_active_qsfp(struct hfi1_pportdata * ppd,u32 * ptr_tx_preset,u32 * ptr_rx_preset,u32 * ptr_total_atten)725 static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset,
726 			    u32 *ptr_rx_preset, u32 *ptr_total_atten)
727 {
728 	int ret;
729 	u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
730 	u8 *cache = ppd->qsfp_info.cache;
731 
732 	ppd->qsfp_info.limiting_active = 1;
733 
734 	ret = set_qsfp_tx(ppd, 0);
735 	if (ret)
736 		return ret;
737 
738 	ret = qual_power(ppd);
739 	if (ret)
740 		return ret;
741 
742 	ret = qual_bitrate(ppd);
743 	if (ret)
744 		return ret;
745 
746 	/*
747 	 * We'll change the QSFP memory contents from here on out, thus we set a
748 	 * flag here to remind ourselves to reset the QSFP module. This prevents
749 	 * reuse of stale settings established in our previous pass through.
750 	 */
751 	if (ppd->qsfp_info.reset_needed) {
752 		ret = reset_qsfp(ppd);
753 		if (ret)
754 			return ret;
755 		refresh_qsfp_cache(ppd, &ppd->qsfp_info);
756 	} else {
757 		ppd->qsfp_info.reset_needed = 1;
758 	}
759 
760 	ret = set_qsfp_high_power(ppd);
761 	if (ret)
762 		return ret;
763 
764 	if (cache[QSFP_EQ_INFO_OFFS] & 0x4) {
765 		ret = get_platform_config_field(
766 			ppd->dd,
767 			PLATFORM_CONFIG_PORT_TABLE, 0,
768 			PORT_TABLE_TX_PRESET_IDX_ACTIVE_EQ,
769 			ptr_tx_preset, 4);
770 		if (ret) {
771 			*ptr_tx_preset = OPA_INVALID_INDEX;
772 			return ret;
773 		}
774 	} else {
775 		ret = get_platform_config_field(
776 			ppd->dd,
777 			PLATFORM_CONFIG_PORT_TABLE, 0,
778 			PORT_TABLE_TX_PRESET_IDX_ACTIVE_NO_EQ,
779 			ptr_tx_preset, 4);
780 		if (ret) {
781 			*ptr_tx_preset = OPA_INVALID_INDEX;
782 			return ret;
783 		}
784 	}
785 
786 	ret = get_platform_config_field(
787 		ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
788 		PORT_TABLE_RX_PRESET_IDX, ptr_rx_preset, 4);
789 	if (ret) {
790 		*ptr_rx_preset = OPA_INVALID_INDEX;
791 		return ret;
792 	}
793 
794 	if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
795 		get_platform_config_field(
796 			ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
797 			PORT_TABLE_LOCAL_ATTEN_25G, ptr_total_atten, 4);
798 	else if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G))
799 		get_platform_config_field(
800 			ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
801 			PORT_TABLE_LOCAL_ATTEN_12G, ptr_total_atten, 4);
802 
803 	apply_cdr_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
804 
805 	apply_eq_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
806 
807 	apply_rx_amplitude_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
808 
809 	ret = set_qsfp_tx(ppd, 1);
810 
811 	return ret;
812 }
813 
tune_qsfp(struct hfi1_pportdata * ppd,u32 * ptr_tx_preset,u32 * ptr_rx_preset,u8 * ptr_tuning_method,u32 * ptr_total_atten)814 static int tune_qsfp(struct hfi1_pportdata *ppd,
815 		     u32 *ptr_tx_preset, u32 *ptr_rx_preset,
816 		     u8 *ptr_tuning_method, u32 *ptr_total_atten)
817 {
818 	u32 cable_atten = 0, remote_atten = 0, platform_atten = 0;
819 	u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
820 	int ret = 0;
821 	u8 *cache = ppd->qsfp_info.cache;
822 
823 	switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) {
824 	case 0xA ... 0xB:
825 		ret = get_platform_config_field(
826 			ppd->dd,
827 			PLATFORM_CONFIG_PORT_TABLE, 0,
828 			PORT_TABLE_LOCAL_ATTEN_25G,
829 			&platform_atten, 4);
830 		if (ret)
831 			return ret;
832 
833 		if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
834 			cable_atten = cache[QSFP_CU_ATTEN_12G_OFFS];
835 		else if ((lss & OPA_LINK_SPEED_12_5G) &&
836 			 (lse & OPA_LINK_SPEED_12_5G))
837 			cable_atten = cache[QSFP_CU_ATTEN_7G_OFFS];
838 
839 		/* Fallback to configured attenuation if cable memory is bad */
840 		if (cable_atten == 0 || cable_atten > 36) {
841 			ret = get_platform_config_field(
842 				ppd->dd,
843 				PLATFORM_CONFIG_SYSTEM_TABLE, 0,
844 				SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_25G,
845 				&cable_atten, 4);
846 			if (ret)
847 				return ret;
848 		}
849 
850 		ret = get_platform_config_field(
851 			ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
852 			PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
853 		if (ret)
854 			return ret;
855 
856 		*ptr_total_atten = platform_atten + cable_atten + remote_atten;
857 
858 		*ptr_tuning_method = OPA_PASSIVE_TUNING;
859 		break;
860 	case 0x0 ... 0x9: fallthrough;
861 	case 0xC: fallthrough;
862 	case 0xE:
863 		ret = tune_active_qsfp(ppd, ptr_tx_preset, ptr_rx_preset,
864 				       ptr_total_atten);
865 		if (ret)
866 			return ret;
867 
868 		*ptr_tuning_method = OPA_ACTIVE_TUNING;
869 		break;
870 	case 0xD: fallthrough;
871 	case 0xF:
872 	default:
873 		dd_dev_warn(ppd->dd, "%s: Unknown/unsupported cable\n",
874 			    __func__);
875 		break;
876 	}
877 	return ret;
878 }
879 
880 /*
881  * This function communicates its success or failure via ppd->driver_link_ready
882  * Thus, it depends on its association with start_link(...) which checks
883  * driver_link_ready before proceeding with the link negotiation and
884  * initialization process.
885  */
tune_serdes(struct hfi1_pportdata * ppd)886 void tune_serdes(struct hfi1_pportdata *ppd)
887 {
888 	int ret = 0;
889 	u32 total_atten = 0;
890 	u32 remote_atten = 0, platform_atten = 0;
891 	u32 rx_preset_index, tx_preset_index;
892 	u8 tuning_method = 0, limiting_active = 0;
893 	struct hfi1_devdata *dd = ppd->dd;
894 
895 	rx_preset_index = OPA_INVALID_INDEX;
896 	tx_preset_index = OPA_INVALID_INDEX;
897 
898 	/* the link defaults to enabled */
899 	ppd->link_enabled = 1;
900 	/* the driver link ready state defaults to not ready */
901 	ppd->driver_link_ready = 0;
902 	ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
903 
904 	/* Skip the tuning for testing (loopback != none) and simulations */
905 	if (loopback != LOOPBACK_NONE ||
906 	    ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
907 		ppd->driver_link_ready = 1;
908 
909 		if (qsfp_mod_present(ppd)) {
910 			ret = acquire_chip_resource(ppd->dd,
911 						    qsfp_resource(ppd->dd),
912 						    QSFP_WAIT);
913 			if (ret) {
914 				dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n",
915 					   __func__, (int)ppd->dd->hfi1_id);
916 				goto bail;
917 			}
918 
919 			refresh_qsfp_cache(ppd, &ppd->qsfp_info);
920 			release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
921 		}
922 
923 		return;
924 	}
925 
926 	switch (ppd->port_type) {
927 	case PORT_TYPE_DISCONNECTED:
928 		ppd->offline_disabled_reason =
929 			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_DISCONNECTED);
930 		dd_dev_warn(dd, "%s: Port disconnected, disabling port\n",
931 			    __func__);
932 		goto bail;
933 	case PORT_TYPE_FIXED:
934 		/* platform_atten, remote_atten pre-zeroed to catch error */
935 		get_platform_config_field(
936 			ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
937 			PORT_TABLE_LOCAL_ATTEN_25G, &platform_atten, 4);
938 
939 		get_platform_config_field(
940 			ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
941 			PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
942 
943 		total_atten = platform_atten + remote_atten;
944 
945 		tuning_method = OPA_PASSIVE_TUNING;
946 		break;
947 	case PORT_TYPE_VARIABLE:
948 		if (qsfp_mod_present(ppd)) {
949 			/*
950 			 * platform_atten, remote_atten pre-zeroed to
951 			 * catch error
952 			 */
953 			get_platform_config_field(
954 				ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
955 				PORT_TABLE_LOCAL_ATTEN_25G,
956 				&platform_atten, 4);
957 
958 			get_platform_config_field(
959 				ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
960 				PORT_TABLE_REMOTE_ATTEN_25G,
961 				&remote_atten, 4);
962 
963 			total_atten = platform_atten + remote_atten;
964 
965 			tuning_method = OPA_PASSIVE_TUNING;
966 		} else {
967 			ppd->offline_disabled_reason =
968 			     HFI1_ODR_MASK(OPA_LINKDOWN_REASON_CHASSIS_CONFIG);
969 			goto bail;
970 		}
971 		break;
972 	case PORT_TYPE_QSFP:
973 		if (qsfp_mod_present(ppd)) {
974 			ret = acquire_chip_resource(ppd->dd,
975 						    qsfp_resource(ppd->dd),
976 						    QSFP_WAIT);
977 			if (ret) {
978 				dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n",
979 					   __func__, (int)ppd->dd->hfi1_id);
980 				goto bail;
981 			}
982 			refresh_qsfp_cache(ppd, &ppd->qsfp_info);
983 
984 			if (ppd->qsfp_info.cache_valid) {
985 				ret = tune_qsfp(ppd,
986 						&tx_preset_index,
987 						&rx_preset_index,
988 						&tuning_method,
989 						&total_atten);
990 
991 				/*
992 				 * We may have modified the QSFP memory, so
993 				 * update the cache to reflect the changes
994 				 */
995 				refresh_qsfp_cache(ppd, &ppd->qsfp_info);
996 				limiting_active =
997 						ppd->qsfp_info.limiting_active;
998 			} else {
999 				dd_dev_err(dd,
1000 					   "%s: Reading QSFP memory failed\n",
1001 					   __func__);
1002 				ret = -EINVAL; /* a fail indication */
1003 			}
1004 			release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
1005 			if (ret)
1006 				goto bail;
1007 		} else {
1008 			ppd->offline_disabled_reason =
1009 			   HFI1_ODR_MASK(
1010 				OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
1011 			goto bail;
1012 		}
1013 		break;
1014 	default:
1015 		dd_dev_warn(ppd->dd, "%s: Unknown port type\n", __func__);
1016 		ppd->port_type = PORT_TYPE_UNKNOWN;
1017 		tuning_method = OPA_UNKNOWN_TUNING;
1018 		total_atten = 0;
1019 		limiting_active = 0;
1020 		tx_preset_index = OPA_INVALID_INDEX;
1021 		break;
1022 	}
1023 
1024 	if (ppd->offline_disabled_reason ==
1025 			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
1026 		apply_tunings(ppd, tx_preset_index, tuning_method,
1027 			      total_atten, limiting_active);
1028 
1029 	if (!ret)
1030 		ppd->driver_link_ready = 1;
1031 
1032 	return;
1033 bail:
1034 	ppd->driver_link_ready = 0;
1035 }
1036