xref: /linux/drivers/gpu/drm/i915/display/intel_dp_link_training.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <linux/debugfs.h>
25 #include <linux/iopoll.h>
26 
27 #include <drm/display/drm_dp_helper.h>
28 #include <drm/drm_print.h>
29 
30 #include "i915_utils.h"
31 #include "intel_display_core.h"
32 #include "intel_display_types.h"
33 #include "intel_dp.h"
34 #include "intel_dp_link_training.h"
35 #include "intel_encoder.h"
36 #include "intel_hotplug.h"
37 #include "intel_panel.h"
38 
39 #define LT_MSG_PREFIX			"[CONNECTOR:%d:%s][ENCODER:%d:%s][%s] "
40 #define LT_MSG_ARGS(_intel_dp, _dp_phy)	(_intel_dp)->attached_connector->base.base.id, \
41 					(_intel_dp)->attached_connector->base.name, \
42 					dp_to_dig_port(_intel_dp)->base.base.base.id, \
43 					dp_to_dig_port(_intel_dp)->base.base.name, \
44 					drm_dp_phy_name(_dp_phy)
45 
46 #define lt_dbg(_intel_dp, _dp_phy, _format, ...) \
47 	drm_dbg_kms(to_intel_display(_intel_dp)->drm, \
48 		    LT_MSG_PREFIX _format, \
49 		    LT_MSG_ARGS(_intel_dp, _dp_phy), ## __VA_ARGS__)
50 
51 #define lt_err(_intel_dp, _dp_phy, _format, ...) do { \
52 	if (intel_digital_port_connected(&dp_to_dig_port(_intel_dp)->base)) \
53 		drm_err(to_intel_display(_intel_dp)->drm, \
54 			LT_MSG_PREFIX _format, \
55 			LT_MSG_ARGS(_intel_dp, _dp_phy), ## __VA_ARGS__); \
56 	else \
57 		lt_dbg(_intel_dp, _dp_phy, "Sink disconnected: " _format, ## __VA_ARGS__); \
58 } while (0)
59 
60 #define MAX_SEQ_TRAIN_FAILURES 2
61 
62 static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp)
63 {
64 	memset(intel_dp->lttpr_common_caps, 0, sizeof(intel_dp->lttpr_common_caps));
65 }
66 
67 static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp)
68 {
69 	intel_dp->lttpr_common_caps[DP_PHY_REPEATER_CNT -
70 				    DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] = 0;
71 }
72 
73 static u8 *intel_dp_lttpr_phy_caps(struct intel_dp *intel_dp,
74 				   enum drm_dp_phy dp_phy)
75 {
76 	return intel_dp->lttpr_phy_caps[dp_phy - DP_PHY_LTTPR1];
77 }
78 
79 static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp,
80 					 const u8 dpcd[DP_RECEIVER_CAP_SIZE],
81 					 enum drm_dp_phy dp_phy)
82 {
83 	u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
84 
85 	if (drm_dp_read_lttpr_phy_caps(&intel_dp->aux, dpcd, dp_phy, phy_caps) < 0) {
86 		lt_dbg(intel_dp, dp_phy, "failed to read the PHY caps\n");
87 		return;
88 	}
89 
90 	lt_dbg(intel_dp, dp_phy, "PHY capabilities: %*ph\n",
91 	       (int)sizeof(intel_dp->lttpr_phy_caps[0]),
92 	       phy_caps);
93 }
94 
95 static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp,
96 					    const u8 dpcd[DP_RECEIVER_CAP_SIZE])
97 {
98 	int ret;
99 
100 	ret = drm_dp_read_lttpr_common_caps(&intel_dp->aux, dpcd,
101 					    intel_dp->lttpr_common_caps);
102 	if (ret < 0)
103 		goto reset_caps;
104 
105 	lt_dbg(intel_dp, DP_PHY_DPRX, "LTTPR common capabilities: %*ph\n",
106 	       (int)sizeof(intel_dp->lttpr_common_caps),
107 	       intel_dp->lttpr_common_caps);
108 
109 	/* The minimum value of LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV is 1.4 */
110 	if (intel_dp->lttpr_common_caps[0] < 0x14)
111 		goto reset_caps;
112 
113 	return true;
114 
115 reset_caps:
116 	intel_dp_reset_lttpr_common_caps(intel_dp);
117 	return false;
118 }
119 
120 static bool
121 intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
122 {
123 	u8 val = enable ? DP_PHY_REPEATER_MODE_TRANSPARENT :
124 			  DP_PHY_REPEATER_MODE_NON_TRANSPARENT;
125 
126 	intel_dp->lttpr_common_caps[DP_PHY_REPEATER_MODE -
127 				    DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] = val;
128 
129 	return true;
130 }
131 
132 bool intel_dp_lttpr_transparent_mode_enabled(struct intel_dp *intel_dp)
133 {
134 	return intel_dp->lttpr_common_caps[DP_PHY_REPEATER_MODE -
135 					   DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] ==
136 		DP_PHY_REPEATER_MODE_TRANSPARENT;
137 }
138 
139 /*
140  * Read the LTTPR common capabilities and switch the LTTPR PHYs to
141  * non-transparent mode if this is supported. Preserve the
142  * transparent/non-transparent mode on an active link.
143  *
144  * Return the number of detected LTTPRs in non-transparent mode or 0 if the
145  * LTTPRs are in transparent mode or the detection failed.
146  */
147 static int intel_dp_init_lttpr_phys(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE])
148 {
149 	int lttpr_count;
150 	int ret;
151 
152 	if (!intel_dp_read_lttpr_common_caps(intel_dp, dpcd))
153 		return 0;
154 
155 	lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
156 	/*
157 	 * Prevent setting LTTPR transparent mode explicitly if no LTTPRs are
158 	 * detected as this breaks link training at least on the Dell WD19TB
159 	 * dock.
160 	 */
161 	if (lttpr_count == 0)
162 		return 0;
163 
164 	/*
165 	 * Don't change the mode on an active link, to prevent a loss of link
166 	 * synchronization. See DP Standard v2.0 3.6.7. about the LTTPR
167 	 * resetting its internal state when the mode is changed from
168 	 * non-transparent to transparent.
169 	 */
170 	if (intel_dp->link.active) {
171 		if (lttpr_count < 0 || intel_dp_lttpr_transparent_mode_enabled(intel_dp))
172 			goto out_reset_lttpr_count;
173 
174 		return lttpr_count;
175 	}
176 
177 	ret = drm_dp_lttpr_init(&intel_dp->aux, lttpr_count);
178 	if (ret) {
179 		lt_dbg(intel_dp, DP_PHY_DPRX,
180 		       "Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n");
181 
182 		intel_dp_set_lttpr_transparent_mode(intel_dp, true);
183 
184 		goto out_reset_lttpr_count;
185 	}
186 
187 	intel_dp_set_lttpr_transparent_mode(intel_dp, false);
188 
189 	return lttpr_count;
190 
191 out_reset_lttpr_count:
192 	intel_dp_reset_lttpr_count(intel_dp);
193 
194 	return 0;
195 }
196 
197 static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE])
198 {
199 	int lttpr_count;
200 	int i;
201 
202 	lttpr_count = intel_dp_init_lttpr_phys(intel_dp, dpcd);
203 
204 	for (i = 0; i < lttpr_count; i++) {
205 		intel_dp_read_lttpr_phy_caps(intel_dp, dpcd, DP_PHY_LTTPR(i));
206 		drm_dp_dump_lttpr_desc(&intel_dp->aux, DP_PHY_LTTPR(i));
207 	}
208 
209 	return lttpr_count;
210 }
211 
212 int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_SIZE])
213 {
214 	struct intel_display *display = to_intel_display(intel_dp);
215 
216 	if (intel_dp_is_edp(intel_dp))
217 		return 0;
218 
219 	/*
220 	 * Detecting LTTPRs must be avoided on platforms with an AUX timeout
221 	 * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
222 	 */
223 	if (DISPLAY_VER(display) >= 10 && !display->platform.geminilake)
224 		if (drm_dp_dpcd_probe(&intel_dp->aux,
225 				      DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV))
226 			return -EIO;
227 
228 	if (drm_dp_read_dpcd_caps(&intel_dp->aux, dpcd))
229 		return -EIO;
230 
231 	return 0;
232 }
233 
234 /**
235  * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode
236  * @intel_dp: Intel DP struct
237  *
238  * Read the LTTPR common and DPRX capabilities and switch to non-transparent
239  * link training mode if any is detected and read the PHY capabilities for all
240  * detected LTTPRs. In case of an LTTPR detection error or if the number of
241  * LTTPRs is more than is supported (8), fall back to the no-LTTPR,
242  * transparent mode link training mode.
243  *
244  * Returns:
245  *   >0  if LTTPRs were detected and the non-transparent LT mode was set. The
246  *       DPRX capabilities are read out.
247  *    0  if no LTTPRs or more than 8 LTTPRs were detected or in case of a
248  *       detection failure and the transparent LT mode was set. The DPRX
249  *       capabilities are read out.
250  *   <0  Reading out the DPRX capabilities failed.
251  */
252 int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
253 {
254 	struct intel_display *display = to_intel_display(intel_dp);
255 	int lttpr_count = 0;
256 
257 	/*
258 	 * Detecting LTTPRs must be avoided on platforms with an AUX timeout
259 	 * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
260 	 */
261 	if (!intel_dp_is_edp(intel_dp) &&
262 	    (DISPLAY_VER(display) >= 10 && !display->platform.geminilake)) {
263 		u8 dpcd[DP_RECEIVER_CAP_SIZE];
264 		int err = intel_dp_read_dprx_caps(intel_dp, dpcd);
265 
266 		if (err != 0)
267 			return err;
268 
269 		lttpr_count = intel_dp_init_lttpr(intel_dp, dpcd);
270 	}
271 
272 	/*
273 	 * The DPTX shall read the DPRX caps after LTTPR detection, so re-read
274 	 * it here.
275 	 */
276 	if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) {
277 		intel_dp_reset_lttpr_common_caps(intel_dp);
278 		return -EIO;
279 	}
280 
281 	return lttpr_count;
282 }
283 
284 static u8 dp_voltage_max(u8 preemph)
285 {
286 	switch (preemph & DP_TRAIN_PRE_EMPHASIS_MASK) {
287 	case DP_TRAIN_PRE_EMPH_LEVEL_0:
288 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
289 	case DP_TRAIN_PRE_EMPH_LEVEL_1:
290 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
291 	case DP_TRAIN_PRE_EMPH_LEVEL_2:
292 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
293 	case DP_TRAIN_PRE_EMPH_LEVEL_3:
294 	default:
295 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
296 	}
297 }
298 
299 static u8 intel_dp_lttpr_voltage_max(struct intel_dp *intel_dp,
300 				     enum drm_dp_phy dp_phy)
301 {
302 	const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
303 
304 	if (drm_dp_lttpr_voltage_swing_level_3_supported(phy_caps))
305 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
306 	else
307 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
308 }
309 
310 static u8 intel_dp_lttpr_preemph_max(struct intel_dp *intel_dp,
311 				     enum drm_dp_phy dp_phy)
312 {
313 	const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
314 
315 	if (drm_dp_lttpr_pre_emphasis_level_3_supported(phy_caps))
316 		return DP_TRAIN_PRE_EMPH_LEVEL_3;
317 	else
318 		return DP_TRAIN_PRE_EMPH_LEVEL_2;
319 }
320 
321 static bool
322 intel_dp_phy_is_downstream_of_source(struct intel_dp *intel_dp,
323 				     enum drm_dp_phy dp_phy)
324 {
325 	struct intel_display *display = to_intel_display(intel_dp);
326 	int lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
327 
328 	drm_WARN_ON_ONCE(display->drm,
329 			 lttpr_count <= 0 && dp_phy != DP_PHY_DPRX);
330 
331 	return lttpr_count <= 0 || dp_phy == DP_PHY_LTTPR(lttpr_count - 1);
332 }
333 
334 static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp,
335 				   const struct intel_crtc_state *crtc_state,
336 				   enum drm_dp_phy dp_phy)
337 {
338 	struct intel_display *display = to_intel_display(intel_dp);
339 	u8 voltage_max;
340 
341 	/*
342 	 * Get voltage_max from the DPTX_PHY (source or LTTPR) upstream from
343 	 * the DPRX_PHY we train.
344 	 */
345 	if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
346 		voltage_max = intel_dp->voltage_max(intel_dp, crtc_state);
347 	else
348 		voltage_max = intel_dp_lttpr_voltage_max(intel_dp, dp_phy + 1);
349 
350 	drm_WARN_ON_ONCE(display->drm,
351 			 voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_2 &&
352 			 voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_3);
353 
354 	return voltage_max;
355 }
356 
357 static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp,
358 				   enum drm_dp_phy dp_phy)
359 {
360 	struct intel_display *display = to_intel_display(intel_dp);
361 	u8 preemph_max;
362 
363 	/*
364 	 * Get preemph_max from the DPTX_PHY (source or LTTPR) upstream from
365 	 * the DPRX_PHY we train.
366 	 */
367 	if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
368 		preemph_max = intel_dp->preemph_max(intel_dp);
369 	else
370 		preemph_max = intel_dp_lttpr_preemph_max(intel_dp, dp_phy + 1);
371 
372 	drm_WARN_ON_ONCE(display->drm,
373 			 preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_2 &&
374 			 preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_3);
375 
376 	return preemph_max;
377 }
378 
379 static bool has_per_lane_signal_levels(struct intel_dp *intel_dp,
380 				       enum drm_dp_phy dp_phy)
381 {
382 	struct intel_display *display = to_intel_display(intel_dp);
383 
384 	return !intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy) ||
385 		DISPLAY_VER(display) >= 10 || display->platform.broxton;
386 }
387 
388 /* 128b/132b */
389 static u8 intel_dp_get_lane_adjust_tx_ffe_preset(struct intel_dp *intel_dp,
390 						 const struct intel_crtc_state *crtc_state,
391 						 enum drm_dp_phy dp_phy,
392 						 const u8 link_status[DP_LINK_STATUS_SIZE],
393 						 int lane)
394 {
395 	u8 tx_ffe = 0;
396 
397 	if (has_per_lane_signal_levels(intel_dp, dp_phy)) {
398 		lane = min(lane, crtc_state->lane_count - 1);
399 		tx_ffe = drm_dp_get_adjust_tx_ffe_preset(link_status, lane);
400 	} else {
401 		for (lane = 0; lane < crtc_state->lane_count; lane++)
402 			tx_ffe = max(tx_ffe, drm_dp_get_adjust_tx_ffe_preset(link_status, lane));
403 	}
404 
405 	return tx_ffe;
406 }
407 
408 /* 8b/10b */
409 static u8 intel_dp_get_lane_adjust_vswing_preemph(struct intel_dp *intel_dp,
410 						  const struct intel_crtc_state *crtc_state,
411 						  enum drm_dp_phy dp_phy,
412 						  const u8 link_status[DP_LINK_STATUS_SIZE],
413 						  int lane)
414 {
415 	u8 v = 0;
416 	u8 p = 0;
417 	u8 voltage_max;
418 	u8 preemph_max;
419 
420 	if (has_per_lane_signal_levels(intel_dp, dp_phy)) {
421 		lane = min(lane, crtc_state->lane_count - 1);
422 
423 		v = drm_dp_get_adjust_request_voltage(link_status, lane);
424 		p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
425 	} else {
426 		for (lane = 0; lane < crtc_state->lane_count; lane++) {
427 			v = max(v, drm_dp_get_adjust_request_voltage(link_status, lane));
428 			p = max(p, drm_dp_get_adjust_request_pre_emphasis(link_status, lane));
429 		}
430 	}
431 
432 	preemph_max = intel_dp_phy_preemph_max(intel_dp, dp_phy);
433 	if (p >= preemph_max)
434 		p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
435 
436 	v = min(v, dp_voltage_max(p));
437 
438 	voltage_max = intel_dp_phy_voltage_max(intel_dp, crtc_state, dp_phy);
439 	if (v >= voltage_max)
440 		v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
441 
442 	return v | p;
443 }
444 
445 static u8 intel_dp_get_lane_adjust_train(struct intel_dp *intel_dp,
446 					 const struct intel_crtc_state *crtc_state,
447 					 enum drm_dp_phy dp_phy,
448 					 const u8 link_status[DP_LINK_STATUS_SIZE],
449 					 int lane)
450 {
451 	if (intel_dp_is_uhbr(crtc_state))
452 		return intel_dp_get_lane_adjust_tx_ffe_preset(intel_dp, crtc_state,
453 							      dp_phy, link_status, lane);
454 	else
455 		return intel_dp_get_lane_adjust_vswing_preemph(intel_dp, crtc_state,
456 							       dp_phy, link_status, lane);
457 }
458 
459 #define TRAIN_REQ_FMT "%d/%d/%d/%d"
460 #define _TRAIN_REQ_VSWING_ARGS(link_status, lane) \
461 	(drm_dp_get_adjust_request_voltage((link_status), (lane)) >> DP_TRAIN_VOLTAGE_SWING_SHIFT)
462 #define TRAIN_REQ_VSWING_ARGS(link_status) \
463 	_TRAIN_REQ_VSWING_ARGS(link_status, 0), \
464 	_TRAIN_REQ_VSWING_ARGS(link_status, 1), \
465 	_TRAIN_REQ_VSWING_ARGS(link_status, 2), \
466 	_TRAIN_REQ_VSWING_ARGS(link_status, 3)
467 #define _TRAIN_REQ_PREEMPH_ARGS(link_status, lane) \
468 	(drm_dp_get_adjust_request_pre_emphasis((link_status), (lane)) >> DP_TRAIN_PRE_EMPHASIS_SHIFT)
469 #define TRAIN_REQ_PREEMPH_ARGS(link_status) \
470 	_TRAIN_REQ_PREEMPH_ARGS(link_status, 0), \
471 	_TRAIN_REQ_PREEMPH_ARGS(link_status, 1), \
472 	_TRAIN_REQ_PREEMPH_ARGS(link_status, 2), \
473 	_TRAIN_REQ_PREEMPH_ARGS(link_status, 3)
474 #define _TRAIN_REQ_TX_FFE_ARGS(link_status, lane) \
475 	drm_dp_get_adjust_tx_ffe_preset((link_status), (lane))
476 #define TRAIN_REQ_TX_FFE_ARGS(link_status) \
477 	_TRAIN_REQ_TX_FFE_ARGS(link_status, 0), \
478 	_TRAIN_REQ_TX_FFE_ARGS(link_status, 1), \
479 	_TRAIN_REQ_TX_FFE_ARGS(link_status, 2), \
480 	_TRAIN_REQ_TX_FFE_ARGS(link_status, 3)
481 
482 bool
483 intel_dp_get_adjust_train(struct intel_dp *intel_dp,
484 			  const struct intel_crtc_state *crtc_state,
485 			  enum drm_dp_phy dp_phy,
486 			  const u8 link_status[DP_LINK_STATUS_SIZE])
487 {
488 	bool changed = false;
489 	int lane;
490 
491 	if (intel_dp_is_uhbr(crtc_state)) {
492 		lt_dbg(intel_dp, dp_phy,
493 		       "128b/132b, lanes: %d, "
494 		       "TX FFE request: " TRAIN_REQ_FMT "\n",
495 		       crtc_state->lane_count,
496 		       TRAIN_REQ_TX_FFE_ARGS(link_status));
497 	} else {
498 		lt_dbg(intel_dp, dp_phy,
499 		       "8b/10b, lanes: %d, "
500 		       "vswing request: " TRAIN_REQ_FMT ", "
501 		       "pre-emphasis request: " TRAIN_REQ_FMT "\n",
502 		       crtc_state->lane_count,
503 		       TRAIN_REQ_VSWING_ARGS(link_status),
504 		       TRAIN_REQ_PREEMPH_ARGS(link_status));
505 	}
506 
507 	for (lane = 0; lane < 4; lane++) {
508 		u8 new = intel_dp_get_lane_adjust_train(intel_dp, crtc_state,
509 							dp_phy, link_status, lane);
510 		if (intel_dp->train_set[lane] == new)
511 			continue;
512 
513 		intel_dp->train_set[lane] = new;
514 		changed = true;
515 	}
516 
517 	return changed;
518 }
519 
520 static int intel_dp_training_pattern_set_reg(struct intel_dp *intel_dp,
521 					     enum drm_dp_phy dp_phy)
522 {
523 	return dp_phy == DP_PHY_DPRX ?
524 		DP_TRAINING_PATTERN_SET :
525 		DP_TRAINING_PATTERN_SET_PHY_REPEATER(dp_phy);
526 }
527 
528 static bool
529 intel_dp_set_link_train(struct intel_dp *intel_dp,
530 			const struct intel_crtc_state *crtc_state,
531 			enum drm_dp_phy dp_phy,
532 			u8 dp_train_pat)
533 {
534 	int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy);
535 	u8 buf[sizeof(intel_dp->train_set) + 1];
536 	int len;
537 
538 	intel_dp_program_link_training_pattern(intel_dp, crtc_state,
539 					       dp_phy, dp_train_pat);
540 
541 	buf[0] = dp_train_pat;
542 	/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
543 	memcpy(buf + 1, intel_dp->train_set, crtc_state->lane_count);
544 	len = crtc_state->lane_count + 1;
545 
546 	return drm_dp_dpcd_write(&intel_dp->aux, reg, buf, len) == len;
547 }
548 
549 static char dp_training_pattern_name(u8 train_pat)
550 {
551 	switch (train_pat) {
552 	case DP_TRAINING_PATTERN_1:
553 	case DP_TRAINING_PATTERN_2:
554 	case DP_TRAINING_PATTERN_3:
555 		return '0' + train_pat;
556 	case DP_TRAINING_PATTERN_4:
557 		return '4';
558 	default:
559 		MISSING_CASE(train_pat);
560 		return '?';
561 	}
562 }
563 
564 void
565 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
566 				       const struct intel_crtc_state *crtc_state,
567 				       enum drm_dp_phy dp_phy,
568 				       u8 dp_train_pat)
569 {
570 	u8 train_pat = intel_dp_training_pattern_symbol(dp_train_pat);
571 
572 	if (train_pat != DP_TRAINING_PATTERN_DISABLE)
573 		lt_dbg(intel_dp, dp_phy, "Using DP training pattern TPS%c\n",
574 		       dp_training_pattern_name(train_pat));
575 
576 	intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat);
577 }
578 
579 #define TRAIN_SET_FMT "%d%s/%d%s/%d%s/%d%s"
580 #define _TRAIN_SET_VSWING_ARGS(train_set) \
581 	((train_set) & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT, \
582 	(train_set) & DP_TRAIN_MAX_SWING_REACHED ? "(max)" : ""
583 #define TRAIN_SET_VSWING_ARGS(train_set) \
584 	_TRAIN_SET_VSWING_ARGS((train_set)[0]), \
585 	_TRAIN_SET_VSWING_ARGS((train_set)[1]), \
586 	_TRAIN_SET_VSWING_ARGS((train_set)[2]), \
587 	_TRAIN_SET_VSWING_ARGS((train_set)[3])
588 #define _TRAIN_SET_PREEMPH_ARGS(train_set) \
589 	((train_set) & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT, \
590 	(train_set) & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ? "(max)" : ""
591 #define TRAIN_SET_PREEMPH_ARGS(train_set) \
592 	_TRAIN_SET_PREEMPH_ARGS((train_set)[0]), \
593 	_TRAIN_SET_PREEMPH_ARGS((train_set)[1]), \
594 	_TRAIN_SET_PREEMPH_ARGS((train_set)[2]), \
595 	_TRAIN_SET_PREEMPH_ARGS((train_set)[3])
596 #define _TRAIN_SET_TX_FFE_ARGS(train_set) \
597 	((train_set) & DP_TX_FFE_PRESET_VALUE_MASK), ""
598 #define TRAIN_SET_TX_FFE_ARGS(train_set) \
599 	_TRAIN_SET_TX_FFE_ARGS((train_set)[0]), \
600 	_TRAIN_SET_TX_FFE_ARGS((train_set)[1]), \
601 	_TRAIN_SET_TX_FFE_ARGS((train_set)[2]), \
602 	_TRAIN_SET_TX_FFE_ARGS((train_set)[3])
603 
604 void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
605 				const struct intel_crtc_state *crtc_state,
606 				enum drm_dp_phy dp_phy)
607 {
608 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
609 
610 	if (intel_dp_is_uhbr(crtc_state)) {
611 		lt_dbg(intel_dp, dp_phy,
612 		       "128b/132b, lanes: %d, "
613 		       "TX FFE presets: " TRAIN_SET_FMT "\n",
614 		       crtc_state->lane_count,
615 		       TRAIN_SET_TX_FFE_ARGS(intel_dp->train_set));
616 	} else {
617 		lt_dbg(intel_dp, dp_phy,
618 		       "8b/10b, lanes: %d, "
619 		       "vswing levels: " TRAIN_SET_FMT ", "
620 		       "pre-emphasis levels: " TRAIN_SET_FMT "\n",
621 		       crtc_state->lane_count,
622 		       TRAIN_SET_VSWING_ARGS(intel_dp->train_set),
623 		       TRAIN_SET_PREEMPH_ARGS(intel_dp->train_set));
624 	}
625 
626 	if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
627 		encoder->set_signal_levels(encoder, crtc_state);
628 }
629 
630 static bool
631 intel_dp_reset_link_train(struct intel_dp *intel_dp,
632 			  const struct intel_crtc_state *crtc_state,
633 			  enum drm_dp_phy dp_phy,
634 			  u8 dp_train_pat)
635 {
636 	memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
637 	intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
638 	return intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, dp_train_pat);
639 }
640 
641 static bool
642 intel_dp_update_link_train(struct intel_dp *intel_dp,
643 			   const struct intel_crtc_state *crtc_state,
644 			   enum drm_dp_phy dp_phy)
645 {
646 	int reg = dp_phy == DP_PHY_DPRX ?
647 			    DP_TRAINING_LANE0_SET :
648 			    DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy);
649 	int ret;
650 
651 	intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
652 
653 	ret = drm_dp_dpcd_write(&intel_dp->aux, reg,
654 				intel_dp->train_set, crtc_state->lane_count);
655 
656 	return ret == crtc_state->lane_count;
657 }
658 
659 /* 128b/132b */
660 static bool intel_dp_lane_max_tx_ffe_reached(u8 train_set_lane)
661 {
662 	return (train_set_lane & DP_TX_FFE_PRESET_VALUE_MASK) ==
663 		DP_TX_FFE_PRESET_VALUE_MASK;
664 }
665 
666 /*
667  * 8b/10b
668  *
669  * FIXME: The DP spec is very confusing here, also the Link CTS spec seems to
670  * have self contradicting tests around this area.
671  *
672  * In lieu of better ideas let's just stop when we've reached the max supported
673  * vswing with its max pre-emphasis, which is either 2+1 or 3+0 depending on
674  * whether vswing level 3 is supported or not.
675  */
676 static bool intel_dp_lane_max_vswing_reached(u8 train_set_lane)
677 {
678 	u8 v = (train_set_lane & DP_TRAIN_VOLTAGE_SWING_MASK) >>
679 		DP_TRAIN_VOLTAGE_SWING_SHIFT;
680 	u8 p = (train_set_lane & DP_TRAIN_PRE_EMPHASIS_MASK) >>
681 		DP_TRAIN_PRE_EMPHASIS_SHIFT;
682 
683 	if ((train_set_lane & DP_TRAIN_MAX_SWING_REACHED) == 0)
684 		return false;
685 
686 	if (v + p != 3)
687 		return false;
688 
689 	return true;
690 }
691 
692 static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp,
693 					     const struct intel_crtc_state *crtc_state)
694 {
695 	int lane;
696 
697 	for (lane = 0; lane < crtc_state->lane_count; lane++) {
698 		u8 train_set_lane = intel_dp->train_set[lane];
699 
700 		if (intel_dp_is_uhbr(crtc_state)) {
701 			if (!intel_dp_lane_max_tx_ffe_reached(train_set_lane))
702 				return false;
703 		} else {
704 			if (!intel_dp_lane_max_vswing_reached(train_set_lane))
705 				return false;
706 		}
707 	}
708 
709 	return true;
710 }
711 
712 void intel_dp_link_training_set_mode(struct intel_dp *intel_dp, int link_rate, bool is_vrr)
713 {
714 	u8 link_config[2];
715 
716 	link_config[0] = is_vrr ? DP_MSA_TIMING_PAR_IGNORE_EN : 0;
717 	link_config[1] = drm_dp_is_uhbr_rate(link_rate) ?
718 			 DP_SET_ANSI_128B132B : DP_SET_ANSI_8B10B;
719 	drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
720 }
721 
722 static void intel_dp_update_downspread_ctrl(struct intel_dp *intel_dp,
723 					    const struct intel_crtc_state *crtc_state)
724 {
725 	 /*
726 	  * Currently, we set the MSA ignore bit based on vrr.in_range.
727 	  * We can't really read that out during driver load since we don't have
728 	  * the connector information read in yet. So if we do end up doing a
729 	  * modeset during initial_commit() we'll clear the MSA ignore bit.
730 	  * GOP likely wouldn't have set this bit so after the initial commit,
731 	  * if there are no modesets and we enable VRR mode seamlessly
732 	  * (without a full modeset), the MSA ignore bit might never get set.
733 	  *
734 	  * #TODO: Implement readout of vrr.in_range.
735 	  * We need fastset support for setting the MSA ignore bit in DPCD,
736 	  * especially on the first real commit when clearing the inherited flag.
737 	  */
738 	intel_dp_link_training_set_mode(intel_dp,
739 					crtc_state->port_clock, crtc_state->vrr.in_range);
740 }
741 
742 void intel_dp_link_training_set_bw(struct intel_dp *intel_dp,
743 				   int link_bw, int rate_select, int lane_count,
744 				   bool enhanced_framing)
745 {
746 	if (enhanced_framing)
747 		lane_count |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
748 
749 	if (link_bw) {
750 		/* DP and eDP v1.3 and earlier link bw set method. */
751 		u8 link_config[] = { link_bw, lane_count };
752 
753 		drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config,
754 				  ARRAY_SIZE(link_config));
755 	} else {
756 		/*
757 		 * eDP v1.4 and later link rate set method.
758 		 *
759 		 * eDP v1.4x sinks shall ignore DP_LINK_RATE_SET if
760 		 * DP_LINK_BW_SET is set. Avoid writing DP_LINK_BW_SET.
761 		 *
762 		 * eDP v1.5 sinks allow choosing either, and the last choice
763 		 * shall be active.
764 		 */
765 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_LANE_COUNT_SET, lane_count);
766 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_LINK_RATE_SET, rate_select);
767 	}
768 }
769 
770 /*
771  * Pick Training Pattern Sequence (TPS) for channel equalization. 128b/132b TPS2
772  * for UHBR+, TPS4 for HBR3 or for 1.4 devices that support it, TPS3 for HBR2 or
773  * 1.2 devices that support it, TPS2 otherwise.
774  */
775 static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
776 				     const struct intel_crtc_state *crtc_state,
777 				     enum drm_dp_phy dp_phy)
778 {
779 	struct intel_display *display = to_intel_display(intel_dp);
780 	bool source_tps3, sink_tps3, source_tps4, sink_tps4;
781 
782 	/* UHBR+ use separate 128b/132b TPS2 */
783 	if (intel_dp_is_uhbr(crtc_state))
784 		return DP_TRAINING_PATTERN_2;
785 
786 	/*
787 	 * TPS4 support is mandatory for all downstream devices that
788 	 * support HBR3. There are no known eDP panels that support
789 	 * TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1 specification.
790 	 * LTTPRs must support TPS4.
791 	 */
792 	source_tps4 = intel_dp_source_supports_tps4(display);
793 	sink_tps4 = dp_phy != DP_PHY_DPRX ||
794 		    drm_dp_tps4_supported(intel_dp->dpcd);
795 	if (source_tps4 && sink_tps4) {
796 		return DP_TRAINING_PATTERN_4;
797 	} else if (crtc_state->port_clock == 810000) {
798 		if (!source_tps4)
799 			lt_dbg(intel_dp, dp_phy,
800 			       "8.1 Gbps link rate without source TPS4 support\n");
801 		if (!sink_tps4)
802 			lt_dbg(intel_dp, dp_phy,
803 			       "8.1 Gbps link rate without sink TPS4 support\n");
804 	}
805 
806 	/*
807 	 * TPS3 support is mandatory for downstream devices that
808 	 * support HBR2. However, not all sinks follow the spec.
809 	 */
810 	source_tps3 = intel_dp_source_supports_tps3(display);
811 	sink_tps3 = dp_phy != DP_PHY_DPRX ||
812 		    drm_dp_tps3_supported(intel_dp->dpcd);
813 	if (source_tps3 && sink_tps3) {
814 		return  DP_TRAINING_PATTERN_3;
815 	} else if (crtc_state->port_clock >= 540000) {
816 		if (!source_tps3)
817 			lt_dbg(intel_dp, dp_phy,
818 			       ">=5.4/6.48 Gbps link rate without source TPS3 support\n");
819 		if (!sink_tps3)
820 			lt_dbg(intel_dp, dp_phy,
821 			       ">=5.4/6.48 Gbps link rate without sink TPS3 support\n");
822 	}
823 
824 	return DP_TRAINING_PATTERN_2;
825 }
826 
827 static void intel_dp_update_link_bw_set(struct intel_dp *intel_dp,
828 					const struct intel_crtc_state *crtc_state,
829 					u8 link_bw, u8 rate_select)
830 {
831 	intel_dp_link_training_set_bw(intel_dp, link_bw, rate_select, crtc_state->lane_count,
832 				      crtc_state->enhanced_framing);
833 }
834 
835 /*
836  * Prepare link training by configuring the link parameters. On DDI platforms
837  * also enable the port here.
838  */
839 static bool
840 intel_dp_prepare_link_train(struct intel_dp *intel_dp,
841 			    const struct intel_crtc_state *crtc_state)
842 {
843 	u8 link_bw, rate_select;
844 
845 	if (intel_dp->prepare_link_retrain)
846 		intel_dp->prepare_link_retrain(intel_dp, crtc_state);
847 
848 	intel_dp_compute_rate(intel_dp, crtc_state->port_clock,
849 			      &link_bw, &rate_select);
850 
851 	/*
852 	 * WaEdpLinkRateDataReload
853 	 *
854 	 * Parade PS8461E MUX (used on various TGL+ laptops) needs
855 	 * to snoop the link rates reported by the sink when we
856 	 * use LINK_RATE_SET in order to operate in jitter cleaning
857 	 * mode (as opposed to redriver mode). Unfortunately it
858 	 * loses track of the snooped link rates when powered down,
859 	 * so we need to make it re-snoop often. Without this high
860 	 * link rates are not stable.
861 	 */
862 	if (!link_bw) {
863 		__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
864 
865 		lt_dbg(intel_dp, DP_PHY_DPRX, "Reloading eDP link rates\n");
866 
867 		drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
868 				 sink_rates, sizeof(sink_rates));
869 	}
870 
871 	if (link_bw)
872 		lt_dbg(intel_dp, DP_PHY_DPRX, "Using LINK_BW_SET value %02x\n",
873 		       link_bw);
874 	else
875 		lt_dbg(intel_dp, DP_PHY_DPRX,
876 		       "Using LINK_RATE_SET value %02x\n",
877 		       rate_select);
878 	/*
879 	 * Spec DP2.1 Section 3.5.2.16
880 	 * Prior to LT DPTX should set 128b/132b DP Channel coding and then set link rate
881 	 */
882 	intel_dp_update_downspread_ctrl(intel_dp, crtc_state);
883 	intel_dp_update_link_bw_set(intel_dp, crtc_state, link_bw,
884 				    rate_select);
885 
886 	return true;
887 }
888 
889 static bool intel_dp_adjust_request_changed(const struct intel_crtc_state *crtc_state,
890 					    const u8 old_link_status[DP_LINK_STATUS_SIZE],
891 					    const u8 new_link_status[DP_LINK_STATUS_SIZE])
892 {
893 	int lane;
894 
895 	for (lane = 0; lane < crtc_state->lane_count; lane++) {
896 		u8 old, new;
897 
898 		if (intel_dp_is_uhbr(crtc_state)) {
899 			old = drm_dp_get_adjust_tx_ffe_preset(old_link_status, lane);
900 			new = drm_dp_get_adjust_tx_ffe_preset(new_link_status, lane);
901 		} else {
902 			old = drm_dp_get_adjust_request_voltage(old_link_status, lane) |
903 				drm_dp_get_adjust_request_pre_emphasis(old_link_status, lane);
904 			new = drm_dp_get_adjust_request_voltage(new_link_status, lane) |
905 				drm_dp_get_adjust_request_pre_emphasis(new_link_status, lane);
906 		}
907 
908 		if (old != new)
909 			return true;
910 	}
911 
912 	return false;
913 }
914 
915 void
916 intel_dp_dump_link_status(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy,
917 			  const u8 link_status[DP_LINK_STATUS_SIZE])
918 {
919 	lt_dbg(intel_dp, dp_phy,
920 	       "ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x\n",
921 	       link_status[0], link_status[1], link_status[2],
922 	       link_status[3], link_status[4], link_status[5]);
923 }
924 
925 /*
926  * Perform the link training clock recovery phase on the given DP PHY using
927  * training pattern 1.
928  */
929 static bool
930 intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
931 				      const struct intel_crtc_state *crtc_state,
932 				      enum drm_dp_phy dp_phy)
933 {
934 	u8 old_link_status[DP_LINK_STATUS_SIZE] = {};
935 	int voltage_tries, cr_tries, max_cr_tries;
936 	u8 link_status[DP_LINK_STATUS_SIZE];
937 	bool max_vswing_reached = false;
938 	int delay_us;
939 
940 	delay_us = drm_dp_read_clock_recovery_delay(&intel_dp->aux,
941 						    intel_dp->dpcd, dp_phy,
942 						    intel_dp_is_uhbr(crtc_state));
943 
944 	/* clock recovery */
945 	if (!intel_dp_reset_link_train(intel_dp, crtc_state, dp_phy,
946 				       DP_TRAINING_PATTERN_1 |
947 				       DP_LINK_SCRAMBLING_DISABLE)) {
948 		lt_err(intel_dp, dp_phy, "Failed to enable link training\n");
949 		return false;
950 	}
951 
952 	/*
953 	 * The DP 1.4 spec defines the max clock recovery retries value
954 	 * as 10 but for pre-DP 1.4 devices we set a very tolerant
955 	 * retry limit of 80 (4 voltage levels x 4 preemphasis levels x
956 	 * x 5 identical voltage retries). Since the previous specs didn't
957 	 * define a limit and created the possibility of an infinite loop
958 	 * we want to prevent any sync from triggering that corner case.
959 	 */
960 	if (intel_dp->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14)
961 		max_cr_tries = 10;
962 	else
963 		max_cr_tries = 80;
964 
965 	voltage_tries = 1;
966 	for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) {
967 		fsleep(delay_us);
968 
969 		if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
970 						     link_status) < 0) {
971 			lt_err(intel_dp, dp_phy, "Failed to get link status\n");
972 			return false;
973 		}
974 
975 		if (drm_dp_clock_recovery_ok(link_status, crtc_state->lane_count)) {
976 			lt_dbg(intel_dp, dp_phy, "Clock recovery OK\n");
977 			return true;
978 		}
979 
980 		if (voltage_tries == 5) {
981 			intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
982 			lt_dbg(intel_dp, dp_phy, "Same voltage tried 5 times\n");
983 			return false;
984 		}
985 
986 		if (max_vswing_reached) {
987 			intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
988 			lt_dbg(intel_dp, dp_phy, "Max Voltage Swing reached\n");
989 			return false;
990 		}
991 
992 		/* Update training set as requested by target */
993 		intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy,
994 					  link_status);
995 		if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
996 			lt_err(intel_dp, dp_phy, "Failed to update link training\n");
997 			return false;
998 		}
999 
1000 		if (!intel_dp_adjust_request_changed(crtc_state, old_link_status, link_status))
1001 			++voltage_tries;
1002 		else
1003 			voltage_tries = 1;
1004 
1005 		memcpy(old_link_status, link_status, sizeof(link_status));
1006 
1007 		if (intel_dp_link_max_vswing_reached(intel_dp, crtc_state))
1008 			max_vswing_reached = true;
1009 	}
1010 
1011 	intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
1012 	lt_err(intel_dp, dp_phy, "Failed clock recovery %d times, giving up!\n",
1013 	       max_cr_tries);
1014 
1015 	return false;
1016 }
1017 
1018 /*
1019  * Perform the link training channel equalization phase on the given DP PHY
1020  * using one of training pattern 2, 3 or 4 depending on the source and
1021  * sink capabilities.
1022  */
1023 static bool
1024 intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
1025 					    const struct intel_crtc_state *crtc_state,
1026 					    enum drm_dp_phy dp_phy)
1027 {
1028 	int tries;
1029 	u32 training_pattern;
1030 	u8 link_status[DP_LINK_STATUS_SIZE];
1031 	bool channel_eq = false;
1032 	int delay_us;
1033 
1034 	delay_us = drm_dp_read_channel_eq_delay(&intel_dp->aux,
1035 						intel_dp->dpcd, dp_phy,
1036 						intel_dp_is_uhbr(crtc_state));
1037 
1038 	training_pattern = intel_dp_training_pattern(intel_dp, crtc_state, dp_phy);
1039 	/* Scrambling is disabled for TPS2/3 and enabled for TPS4 */
1040 	if (training_pattern != DP_TRAINING_PATTERN_4)
1041 		training_pattern |= DP_LINK_SCRAMBLING_DISABLE;
1042 
1043 	/* channel equalization */
1044 	if (!intel_dp_set_link_train(intel_dp, crtc_state, dp_phy,
1045 				     training_pattern)) {
1046 		lt_err(intel_dp, dp_phy, "Failed to start channel equalization\n");
1047 		return false;
1048 	}
1049 
1050 	for (tries = 0; tries < 5; tries++) {
1051 		fsleep(delay_us);
1052 
1053 		if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
1054 						     link_status) < 0) {
1055 			lt_err(intel_dp, dp_phy, "Failed to get link status\n");
1056 			break;
1057 		}
1058 
1059 		/* Make sure clock is still ok */
1060 		if (!drm_dp_clock_recovery_ok(link_status,
1061 					      crtc_state->lane_count)) {
1062 			intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
1063 			lt_dbg(intel_dp, dp_phy,
1064 			       "Clock recovery check failed, cannot continue channel equalization\n");
1065 			break;
1066 		}
1067 
1068 		if (drm_dp_channel_eq_ok(link_status,
1069 					 crtc_state->lane_count)) {
1070 			channel_eq = true;
1071 			lt_dbg(intel_dp, dp_phy, "Channel EQ done. DP Training successful\n");
1072 			break;
1073 		}
1074 
1075 		/* Update training set as requested by target */
1076 		intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy,
1077 					  link_status);
1078 		if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
1079 			lt_err(intel_dp, dp_phy, "Failed to update link training\n");
1080 			break;
1081 		}
1082 	}
1083 
1084 	/* Try 5 times, else fail and try at lower BW */
1085 	if (tries == 5) {
1086 		intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
1087 		lt_dbg(intel_dp, dp_phy, "Channel equalization failed 5 times\n");
1088 	}
1089 
1090 	return channel_eq;
1091 }
1092 
1093 static bool intel_dp_disable_dpcd_training_pattern(struct intel_dp *intel_dp,
1094 						   enum drm_dp_phy dp_phy)
1095 {
1096 	int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy);
1097 	u8 val = DP_TRAINING_PATTERN_DISABLE;
1098 
1099 	return drm_dp_dpcd_write(&intel_dp->aux, reg, &val, 1) == 1;
1100 }
1101 
1102 static int
1103 intel_dp_128b132b_intra_hop(struct intel_dp *intel_dp,
1104 			    const struct intel_crtc_state *crtc_state)
1105 {
1106 	u8 sink_status;
1107 	int ret;
1108 
1109 	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_STATUS, &sink_status);
1110 	if (ret != 1) {
1111 		lt_dbg(intel_dp, DP_PHY_DPRX, "Failed to read sink status\n");
1112 		return ret < 0 ? ret : -EIO;
1113 	}
1114 
1115 	return sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION ? 1 : 0;
1116 }
1117 
1118 /**
1119  * intel_dp_stop_link_train - stop link training
1120  * @intel_dp: DP struct
1121  * @crtc_state: state for CRTC attached to the encoder
1122  *
1123  * Stop the link training of the @intel_dp port, disabling the training
1124  * pattern in the sink's DPCD, and disabling the test pattern symbol
1125  * generation on the port.
1126  *
1127  * What symbols are output on the port after this point is
1128  * platform specific: On DDI/VLV/CHV platforms it will be the idle pattern
1129  * with the pipe being disabled, on older platforms it's HW specific if/how an
1130  * idle pattern is generated, as the pipe is already enabled here for those.
1131  *
1132  * This function must be called after intel_dp_start_link_train().
1133  */
1134 void intel_dp_stop_link_train(struct intel_dp *intel_dp,
1135 			      const struct intel_crtc_state *crtc_state)
1136 {
1137 	struct intel_display *display = to_intel_display(intel_dp);
1138 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
1139 	int ret;
1140 
1141 	intel_dp->link.active = true;
1142 
1143 	intel_dp_program_link_training_pattern(intel_dp, crtc_state, DP_PHY_DPRX,
1144 					       DP_TRAINING_PATTERN_DISABLE);
1145 
1146 	if (intel_dp_is_uhbr(crtc_state)) {
1147 		ret = poll_timeout_us(ret = intel_dp_128b132b_intra_hop(intel_dp, crtc_state),
1148 				      ret == 0,
1149 				      500, 500 * 1000, false);
1150 		if (ret)
1151 			lt_dbg(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clearing\n");
1152 	}
1153 
1154 	intel_hpd_unblock(encoder);
1155 
1156 	if (!display->hotplug.ignore_long_hpd &&
1157 	    intel_dp->link.seq_train_failures < MAX_SEQ_TRAIN_FAILURES) {
1158 		int delay_ms = intel_dp->link.seq_train_failures ? 0 : 2000;
1159 
1160 		intel_encoder_link_check_queue_work(encoder, delay_ms);
1161 	}
1162 }
1163 
1164 static bool
1165 intel_dp_link_train_phy(struct intel_dp *intel_dp,
1166 			const struct intel_crtc_state *crtc_state,
1167 			enum drm_dp_phy dp_phy)
1168 {
1169 	bool ret = false;
1170 
1171 	if (!intel_dp_link_training_clock_recovery(intel_dp, crtc_state, dp_phy))
1172 		goto out;
1173 
1174 	if (!intel_dp_link_training_channel_equalization(intel_dp, crtc_state, dp_phy))
1175 		goto out;
1176 
1177 	ret = true;
1178 
1179 out:
1180 	lt_dbg(intel_dp, dp_phy,
1181 	       "Link Training %s at link rate = %d, lane count = %d\n",
1182 	       ret ? "passed" : "failed",
1183 	       crtc_state->port_clock, crtc_state->lane_count);
1184 
1185 	return ret;
1186 }
1187 
1188 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
1189 						     int link_rate,
1190 						     u8 lane_count)
1191 {
1192 	/* FIXME figure out what we actually want here */
1193 	const struct drm_display_mode *fixed_mode =
1194 		intel_panel_preferred_fixed_mode(intel_dp->attached_connector);
1195 	int mode_rate, max_rate;
1196 
1197 	mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
1198 	max_rate = intel_dp_max_link_data_rate(intel_dp, link_rate, lane_count);
1199 	if (mode_rate > max_rate)
1200 		return false;
1201 
1202 	return true;
1203 }
1204 
1205 static bool reduce_link_params_in_bw_order(struct intel_dp *intel_dp,
1206 					   const struct intel_crtc_state *crtc_state,
1207 					   int *new_link_rate, int *new_lane_count)
1208 {
1209 	int link_rate;
1210 	int lane_count;
1211 	int i;
1212 
1213 	i = intel_dp_link_config_index(intel_dp, crtc_state->port_clock, crtc_state->lane_count);
1214 	for (i--; i >= 0; i--) {
1215 		intel_dp_link_config_get(intel_dp, i, &link_rate, &lane_count);
1216 
1217 		if ((intel_dp->link.force_rate &&
1218 		     intel_dp->link.force_rate != link_rate) ||
1219 		    (intel_dp->link.force_lane_count &&
1220 		     intel_dp->link.force_lane_count != lane_count))
1221 			continue;
1222 
1223 		break;
1224 	}
1225 
1226 	if (i < 0)
1227 		return false;
1228 
1229 	*new_link_rate = link_rate;
1230 	*new_lane_count = lane_count;
1231 
1232 	return true;
1233 }
1234 
1235 static int reduce_link_rate(struct intel_dp *intel_dp, int current_rate)
1236 {
1237 	int rate_index;
1238 	int new_rate;
1239 
1240 	if (intel_dp->link.force_rate)
1241 		return -1;
1242 
1243 	rate_index = intel_dp_rate_index(intel_dp->common_rates,
1244 					 intel_dp->num_common_rates,
1245 					 current_rate);
1246 
1247 	if (rate_index <= 0)
1248 		return -1;
1249 
1250 	new_rate = intel_dp_common_rate(intel_dp, rate_index - 1);
1251 
1252 	/* TODO: Make switching from UHBR to non-UHBR rates work. */
1253 	if (drm_dp_is_uhbr_rate(current_rate) != drm_dp_is_uhbr_rate(new_rate))
1254 		return -1;
1255 
1256 	return new_rate;
1257 }
1258 
1259 static int reduce_lane_count(struct intel_dp *intel_dp, int current_lane_count)
1260 {
1261 	if (intel_dp->link.force_lane_count)
1262 		return -1;
1263 
1264 	if (current_lane_count == 1)
1265 		return -1;
1266 
1267 	return current_lane_count >> 1;
1268 }
1269 
1270 static bool reduce_link_params_in_rate_lane_order(struct intel_dp *intel_dp,
1271 						  const struct intel_crtc_state *crtc_state,
1272 						  int *new_link_rate, int *new_lane_count)
1273 {
1274 	int link_rate;
1275 	int lane_count;
1276 
1277 	lane_count = crtc_state->lane_count;
1278 	link_rate = reduce_link_rate(intel_dp, crtc_state->port_clock);
1279 	if (link_rate < 0) {
1280 		lane_count = reduce_lane_count(intel_dp, crtc_state->lane_count);
1281 		link_rate = intel_dp_max_common_rate(intel_dp);
1282 	}
1283 
1284 	if (lane_count < 0)
1285 		return false;
1286 
1287 	*new_link_rate = link_rate;
1288 	*new_lane_count = lane_count;
1289 
1290 	return true;
1291 }
1292 
1293 static bool reduce_link_params(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state,
1294 			       int *new_link_rate, int *new_lane_count)
1295 {
1296 	/* TODO: Use the same fallback logic on SST as on MST. */
1297 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
1298 		return reduce_link_params_in_bw_order(intel_dp, crtc_state,
1299 						      new_link_rate, new_lane_count);
1300 	else
1301 		return reduce_link_params_in_rate_lane_order(intel_dp, crtc_state,
1302 							     new_link_rate, new_lane_count);
1303 }
1304 
1305 static int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
1306 						   const struct intel_crtc_state *crtc_state)
1307 {
1308 	int new_link_rate;
1309 	int new_lane_count;
1310 
1311 	if (intel_dp_is_edp(intel_dp) && !intel_dp->use_max_params) {
1312 		lt_dbg(intel_dp, DP_PHY_DPRX,
1313 		       "Retrying Link training for eDP with max parameters\n");
1314 		intel_dp->use_max_params = true;
1315 		return 0;
1316 	}
1317 
1318 	if (!reduce_link_params(intel_dp, crtc_state, &new_link_rate, &new_lane_count))
1319 		return -1;
1320 
1321 	if (intel_dp_is_edp(intel_dp) &&
1322 	    !intel_dp_can_link_train_fallback_for_edp(intel_dp, new_link_rate, new_lane_count)) {
1323 		lt_dbg(intel_dp, DP_PHY_DPRX,
1324 		       "Retrying Link training for eDP with same parameters\n");
1325 		return 0;
1326 	}
1327 
1328 	lt_dbg(intel_dp, DP_PHY_DPRX,
1329 	       "Reducing link parameters from %dx%d to %dx%d\n",
1330 	       crtc_state->lane_count, crtc_state->port_clock,
1331 	       new_lane_count, new_link_rate);
1332 
1333 	intel_dp->link.max_rate = new_link_rate;
1334 	intel_dp->link.max_lane_count = new_lane_count;
1335 
1336 	return 0;
1337 }
1338 
1339 static bool intel_dp_schedule_fallback_link_training(struct intel_atomic_state *state,
1340 						     struct intel_dp *intel_dp,
1341 						     const struct intel_crtc_state *crtc_state)
1342 {
1343 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
1344 
1345 	if (!intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base)) {
1346 		lt_dbg(intel_dp, DP_PHY_DPRX, "Link Training failed on disconnected sink.\n");
1347 		return true;
1348 	}
1349 
1350 	if (intel_dp->hobl_active) {
1351 		lt_dbg(intel_dp, DP_PHY_DPRX,
1352 		       "Link Training failed with HOBL active, not enabling it from now on\n");
1353 		intel_dp->hobl_failed = true;
1354 	} else if (intel_dp_get_link_train_fallback_values(intel_dp, crtc_state)) {
1355 		return false;
1356 	}
1357 
1358 	/* Schedule a Hotplug Uevent to userspace to start modeset */
1359 	intel_dp_queue_modeset_retry_for_link(state, encoder, crtc_state);
1360 
1361 	return true;
1362 }
1363 
1364 /* Perform the link training on all LTTPRs and the DPRX on a link. */
1365 static bool
1366 intel_dp_link_train_all_phys(struct intel_dp *intel_dp,
1367 			     const struct intel_crtc_state *crtc_state,
1368 			     int lttpr_count)
1369 {
1370 	bool ret = true;
1371 	int i;
1372 
1373 	for (i = lttpr_count - 1; i >= 0; i--) {
1374 		enum drm_dp_phy dp_phy = DP_PHY_LTTPR(i);
1375 
1376 		ret = intel_dp_link_train_phy(intel_dp, crtc_state, dp_phy);
1377 		intel_dp_disable_dpcd_training_pattern(intel_dp, dp_phy);
1378 
1379 		if (!ret)
1380 			break;
1381 	}
1382 
1383 	if (ret)
1384 		ret = intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
1385 
1386 	intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
1387 	intel_dp->set_idle_link_train(intel_dp, crtc_state);
1388 
1389 	return ret;
1390 }
1391 
1392 /*
1393  * 128b/132b DP LANEx_EQ_DONE Sequence (DP 2.0 E11 3.5.2.16.1)
1394  */
1395 static bool
1396 intel_dp_128b132b_lane_eq(struct intel_dp *intel_dp,
1397 			  const struct intel_crtc_state *crtc_state)
1398 {
1399 	u8 link_status[DP_LINK_STATUS_SIZE];
1400 	int delay_us;
1401 	int try, max_tries = 20;
1402 	unsigned long deadline;
1403 	bool timeout = false;
1404 
1405 	/*
1406 	 * Reset signal levels. Start transmitting 128b/132b TPS1.
1407 	 *
1408 	 * Put DPRX and LTTPRs (if any) into intra-hop AUX mode by writing TPS1
1409 	 * in DP_TRAINING_PATTERN_SET.
1410 	 */
1411 	if (!intel_dp_reset_link_train(intel_dp, crtc_state, DP_PHY_DPRX,
1412 				       DP_TRAINING_PATTERN_1)) {
1413 		lt_err(intel_dp, DP_PHY_DPRX, "Failed to start 128b/132b TPS1\n");
1414 		return false;
1415 	}
1416 
1417 	delay_us = drm_dp_128b132b_read_aux_rd_interval(&intel_dp->aux);
1418 
1419 	/* Read the initial TX FFE settings. */
1420 	if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) {
1421 		lt_err(intel_dp, DP_PHY_DPRX, "Failed to read TX FFE presets\n");
1422 		return false;
1423 	}
1424 
1425 	/* Update signal levels and training set as requested. */
1426 	intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, link_status);
1427 	if (!intel_dp_update_link_train(intel_dp, crtc_state, DP_PHY_DPRX)) {
1428 		lt_err(intel_dp, DP_PHY_DPRX, "Failed to set initial TX FFE settings\n");
1429 		return false;
1430 	}
1431 
1432 	/* Start transmitting 128b/132b TPS2. */
1433 	if (!intel_dp_set_link_train(intel_dp, crtc_state, DP_PHY_DPRX,
1434 				     DP_TRAINING_PATTERN_2)) {
1435 		lt_err(intel_dp, DP_PHY_DPRX, "Failed to start 128b/132b TPS2\n");
1436 		return false;
1437 	}
1438 
1439 	/* Time budget for the LANEx_EQ_DONE Sequence */
1440 	deadline = jiffies + msecs_to_jiffies_timeout(450);
1441 
1442 	for (try = 0; try < max_tries; try++) {
1443 		fsleep(delay_us);
1444 
1445 		if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) {
1446 			lt_err(intel_dp, DP_PHY_DPRX, "Failed to read link status\n");
1447 			return false;
1448 		}
1449 
1450 		if (drm_dp_128b132b_link_training_failed(link_status)) {
1451 			intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
1452 			lt_err(intel_dp, DP_PHY_DPRX,
1453 			       "Downstream link training failure\n");
1454 			return false;
1455 		}
1456 
1457 		if (drm_dp_128b132b_lane_channel_eq_done(link_status, crtc_state->lane_count)) {
1458 			lt_dbg(intel_dp, DP_PHY_DPRX, "Lane channel eq done\n");
1459 			break;
1460 		}
1461 
1462 		if (timeout) {
1463 			intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
1464 			lt_err(intel_dp, DP_PHY_DPRX, "Lane channel eq timeout\n");
1465 			return false;
1466 		}
1467 
1468 		if (time_after(jiffies, deadline))
1469 			timeout = true; /* try one last time after deadline */
1470 
1471 		/*
1472 		 * During LT, Tx shall read AUX_RD_INTERVAL just before writing the new FFE
1473 		 * presets.
1474 		 */
1475 		delay_us = drm_dp_128b132b_read_aux_rd_interval(&intel_dp->aux);
1476 
1477 		intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, link_status);
1478 
1479 		/* Update signal levels and training set as requested. */
1480 		if (!intel_dp_update_link_train(intel_dp, crtc_state, DP_PHY_DPRX)) {
1481 			lt_err(intel_dp, DP_PHY_DPRX, "Failed to update TX FFE settings\n");
1482 			return false;
1483 		}
1484 	}
1485 
1486 	if (try == max_tries) {
1487 		intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
1488 		lt_err(intel_dp, DP_PHY_DPRX, "Max loop count reached\n");
1489 		return false;
1490 	}
1491 
1492 	for (;;) {
1493 		if (time_after(jiffies, deadline))
1494 			timeout = true; /* try one last time after deadline */
1495 
1496 		if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) {
1497 			lt_err(intel_dp, DP_PHY_DPRX, "Failed to read link status\n");
1498 			return false;
1499 		}
1500 
1501 		if (drm_dp_128b132b_link_training_failed(link_status)) {
1502 			intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
1503 			lt_err(intel_dp, DP_PHY_DPRX, "Downstream link training failure\n");
1504 			return false;
1505 		}
1506 
1507 		if (drm_dp_128b132b_eq_interlane_align_done(link_status)) {
1508 			lt_dbg(intel_dp, DP_PHY_DPRX, "Interlane align done\n");
1509 			break;
1510 		}
1511 
1512 		if (timeout) {
1513 			intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
1514 			lt_err(intel_dp, DP_PHY_DPRX, "Interlane align timeout\n");
1515 			return false;
1516 		}
1517 
1518 		usleep_range(2000, 3000);
1519 	}
1520 
1521 	return true;
1522 }
1523 
1524 /*
1525  * 128b/132b DP LANEx_CDS_DONE Sequence (DP 2.0 E11 3.5.2.16.2)
1526  */
1527 static bool
1528 intel_dp_128b132b_lane_cds(struct intel_dp *intel_dp,
1529 			   const struct intel_crtc_state *crtc_state,
1530 			   int lttpr_count)
1531 {
1532 	u8 link_status[DP_LINK_STATUS_SIZE];
1533 	unsigned long deadline;
1534 
1535 	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
1536 			       DP_TRAINING_PATTERN_2_CDS) != 1) {
1537 		lt_err(intel_dp, DP_PHY_DPRX, "Failed to start 128b/132b TPS2 CDS\n");
1538 		return false;
1539 	}
1540 
1541 	/* Time budget for the LANEx_CDS_DONE Sequence */
1542 	deadline = jiffies + msecs_to_jiffies_timeout((lttpr_count + 1) * 20);
1543 
1544 	for (;;) {
1545 		bool timeout = false;
1546 
1547 		if (time_after(jiffies, deadline))
1548 			timeout = true; /* try one last time after deadline */
1549 
1550 		usleep_range(2000, 3000);
1551 
1552 		if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) {
1553 			lt_err(intel_dp, DP_PHY_DPRX, "Failed to read link status\n");
1554 			return false;
1555 		}
1556 
1557 		if (drm_dp_128b132b_eq_interlane_align_done(link_status) &&
1558 		    drm_dp_128b132b_cds_interlane_align_done(link_status) &&
1559 		    drm_dp_128b132b_lane_symbol_locked(link_status, crtc_state->lane_count)) {
1560 			lt_dbg(intel_dp, DP_PHY_DPRX, "CDS interlane align done\n");
1561 			break;
1562 		}
1563 
1564 		if (drm_dp_128b132b_link_training_failed(link_status)) {
1565 			intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
1566 			lt_err(intel_dp, DP_PHY_DPRX, "Downstream link training failure\n");
1567 			return false;
1568 		}
1569 
1570 		if (timeout) {
1571 			intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
1572 			lt_err(intel_dp, DP_PHY_DPRX, "CDS timeout\n");
1573 			return false;
1574 		}
1575 	}
1576 
1577 	return true;
1578 }
1579 
1580 /*
1581  * 128b/132b link training sequence. (DP 2.0 E11 SCR on link training.)
1582  */
1583 static bool
1584 intel_dp_128b132b_link_train(struct intel_dp *intel_dp,
1585 			     const struct intel_crtc_state *crtc_state,
1586 			     int lttpr_count)
1587 {
1588 	bool passed = false;
1589 	int ret;
1590 
1591 	ret = poll_timeout_us(ret = intel_dp_128b132b_intra_hop(intel_dp, crtc_state),
1592 			      ret == 0,
1593 			      500, 500 * 1000, false);
1594 	if (ret) {
1595 		lt_err(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clear\n");
1596 		goto out;
1597 	}
1598 
1599 	if (intel_dp_128b132b_lane_eq(intel_dp, crtc_state) &&
1600 	    intel_dp_128b132b_lane_cds(intel_dp, crtc_state, lttpr_count))
1601 		passed = true;
1602 
1603 	lt_dbg(intel_dp, DP_PHY_DPRX,
1604 	       "128b/132b Link Training %s at link rate = %d, lane count = %d\n",
1605 	       passed ? "passed" : "failed",
1606 	       crtc_state->port_clock, crtc_state->lane_count);
1607 
1608 out:
1609 	/*
1610 	 * Ensure that the training pattern does get set to TPS2 even in case
1611 	 * of a failure, as is the case at the end of a passing link training
1612 	 * and what is expected by the transcoder. Leaving TPS1 set (and
1613 	 * disabling the link train mode in DP_TP_CTL later from TPS1 directly)
1614 	 * would result in a stuck transcoder HW state and flip-done timeouts
1615 	 * later in the modeset sequence.
1616 	 */
1617 	if (!passed)
1618 		intel_dp_program_link_training_pattern(intel_dp, crtc_state,
1619 						       DP_PHY_DPRX, DP_TRAINING_PATTERN_2);
1620 
1621 	intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
1622 
1623 	return passed;
1624 }
1625 
1626 /**
1627  * intel_dp_start_link_train - start link training
1628  * @state: Atomic state
1629  * @intel_dp: DP struct
1630  * @crtc_state: state for CRTC attached to the encoder
1631  *
1632  * Start the link training of the @intel_dp port, scheduling a fallback
1633  * retraining with reduced link rate/lane parameters if the link training
1634  * fails.
1635  * After calling this function intel_dp_stop_link_train() must be called.
1636  */
1637 void intel_dp_start_link_train(struct intel_atomic_state *state,
1638 			       struct intel_dp *intel_dp,
1639 			       const struct intel_crtc_state *crtc_state)
1640 {
1641 	struct intel_display *display = to_intel_display(state);
1642 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1643 	struct intel_encoder *encoder = &dig_port->base;
1644 	bool passed;
1645 	/*
1646 	 * Reinit the LTTPRs here to ensure that they are switched to
1647 	 * non-transparent mode. During an earlier LTTPR detection this
1648 	 * could've been prevented by an active link.
1649 	 */
1650 	int lttpr_count;
1651 
1652 	intel_hpd_block(encoder);
1653 
1654 	lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp);
1655 
1656 	if (lttpr_count < 0)
1657 		/* Still continue with enabling the port and link training. */
1658 		lttpr_count = 0;
1659 
1660 	intel_dp_prepare_link_train(intel_dp, crtc_state);
1661 
1662 	if (intel_dp_is_uhbr(crtc_state))
1663 		passed = intel_dp_128b132b_link_train(intel_dp, crtc_state, lttpr_count);
1664 	else
1665 		passed = intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count);
1666 
1667 	if (intel_dp->link.force_train_failure) {
1668 		intel_dp->link.force_train_failure--;
1669 		lt_dbg(intel_dp, DP_PHY_DPRX, "Forcing link training failure\n");
1670 	} else if (passed) {
1671 		intel_dp->link.seq_train_failures = 0;
1672 		return;
1673 	}
1674 
1675 	intel_dp->link.seq_train_failures++;
1676 
1677 	/*
1678 	 * Ignore the link failure in CI
1679 	 *
1680 	 * In fixed environments like CI, sometimes unexpected long HPDs are
1681 	 * generated by the displays. If ignore_long_hpd flag is set, such long
1682 	 * HPDs are ignored. And probably as a consequence of these ignored
1683 	 * long HPDs, subsequent link trainings are failed resulting into CI
1684 	 * execution failures.
1685 	 *
1686 	 * For test cases which rely on the link training or processing of HPDs
1687 	 * ignore_long_hpd flag can unset from the testcase.
1688 	 */
1689 	if (display->hotplug.ignore_long_hpd) {
1690 		lt_dbg(intel_dp, DP_PHY_DPRX, "Ignore the link failure\n");
1691 		return;
1692 	}
1693 
1694 	if (intel_dp->link.seq_train_failures < MAX_SEQ_TRAIN_FAILURES)
1695 		return;
1696 
1697 	if (intel_dp_schedule_fallback_link_training(state, intel_dp, crtc_state))
1698 		return;
1699 
1700 	intel_dp->link.retrain_disabled = true;
1701 
1702 	if (!passed)
1703 		lt_err(intel_dp, DP_PHY_DPRX, "Can't reduce link training parameters after failure\n");
1704 	else
1705 		lt_dbg(intel_dp, DP_PHY_DPRX, "Can't reduce link training parameters after forced failure\n");
1706 }
1707 
1708 void intel_dp_128b132b_sdp_crc16(struct intel_dp *intel_dp,
1709 				 const struct intel_crtc_state *crtc_state)
1710 {
1711 	/*
1712 	 * VIDEO_DIP_CTL register bit 31 should be set to '0' to not
1713 	 * disable SDP CRC. This is applicable for Display version 13.
1714 	 * Default value of bit 31 is '0' hence discarding the write
1715 	 * TODO: Corrective actions on SDP corruption yet to be defined
1716 	 */
1717 	if (!intel_dp_is_uhbr(crtc_state))
1718 		return;
1719 
1720 	/* DP v2.0 SCR on SDP CRC16 for 128b/132b Link Layer */
1721 	drm_dp_dpcd_writeb(&intel_dp->aux,
1722 			   DP_SDP_ERROR_DETECTION_CONFIGURATION,
1723 			   DP_SDP_CRC16_128B132B_EN);
1724 
1725 	lt_dbg(intel_dp, DP_PHY_DPRX, "DP2.0 SDP CRC16 for 128b/132b enabled\n");
1726 }
1727 
1728 static int i915_dp_force_link_rate_show(struct seq_file *m, void *data)
1729 {
1730 	struct intel_connector *connector = to_intel_connector(m->private);
1731 	struct intel_display *display = to_intel_display(connector);
1732 	struct intel_dp *intel_dp = intel_attached_dp(connector);
1733 	int current_rate = -1;
1734 	int force_rate;
1735 	int err;
1736 	int i;
1737 
1738 	err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
1739 	if (err)
1740 		return err;
1741 
1742 	if (intel_dp->link.active)
1743 		current_rate = intel_dp->link_rate;
1744 	force_rate = intel_dp->link.force_rate;
1745 
1746 	drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
1747 
1748 	seq_printf(m, "%sauto%s",
1749 		   force_rate == 0 ? "[" : "",
1750 		   force_rate == 0 ? "]" : "");
1751 
1752 	for (i = 0; i < intel_dp->num_source_rates; i++)
1753 		seq_printf(m, " %s%d%s%s",
1754 			   intel_dp->source_rates[i] == force_rate ? "[" : "",
1755 			   intel_dp->source_rates[i],
1756 			   intel_dp->source_rates[i] == current_rate ? "*" : "",
1757 			   intel_dp->source_rates[i] == force_rate ? "]" : "");
1758 
1759 	seq_putc(m, '\n');
1760 
1761 	return 0;
1762 }
1763 
1764 static int parse_link_rate(struct intel_dp *intel_dp, const char __user *ubuf, size_t len)
1765 {
1766 	char *kbuf;
1767 	const char *p;
1768 	int rate;
1769 	int ret = 0;
1770 
1771 	kbuf = memdup_user_nul(ubuf, len);
1772 	if (IS_ERR(kbuf))
1773 		return PTR_ERR(kbuf);
1774 
1775 	p = strim(kbuf);
1776 
1777 	if (!strcmp(p, "auto")) {
1778 		rate = 0;
1779 	} else {
1780 		ret = kstrtoint(p, 0, &rate);
1781 		if (ret < 0)
1782 			goto out_free;
1783 
1784 		if (intel_dp_rate_index(intel_dp->source_rates,
1785 					intel_dp->num_source_rates,
1786 					rate) < 0)
1787 			ret = -EINVAL;
1788 	}
1789 
1790 out_free:
1791 	kfree(kbuf);
1792 
1793 	return ret < 0 ? ret : rate;
1794 }
1795 
1796 static ssize_t i915_dp_force_link_rate_write(struct file *file,
1797 					     const char __user *ubuf,
1798 					     size_t len, loff_t *offp)
1799 {
1800 	struct seq_file *m = file->private_data;
1801 	struct intel_connector *connector = to_intel_connector(m->private);
1802 	struct intel_display *display = to_intel_display(connector);
1803 	struct intel_dp *intel_dp = intel_attached_dp(connector);
1804 	int rate;
1805 	int err;
1806 
1807 	rate = parse_link_rate(intel_dp, ubuf, len);
1808 	if (rate < 0)
1809 		return rate;
1810 
1811 	err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
1812 	if (err)
1813 		return err;
1814 
1815 	intel_dp_reset_link_params(intel_dp);
1816 	intel_dp->link.force_rate = rate;
1817 
1818 	drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
1819 
1820 	*offp += len;
1821 
1822 	return len;
1823 }
1824 DEFINE_SHOW_STORE_ATTRIBUTE(i915_dp_force_link_rate);
1825 
1826 static int i915_dp_force_lane_count_show(struct seq_file *m, void *data)
1827 {
1828 	struct intel_connector *connector = to_intel_connector(m->private);
1829 	struct intel_display *display = to_intel_display(connector);
1830 	struct intel_dp *intel_dp = intel_attached_dp(connector);
1831 	int current_lane_count = -1;
1832 	int force_lane_count;
1833 	int err;
1834 	int i;
1835 
1836 	err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
1837 	if (err)
1838 		return err;
1839 
1840 	if (intel_dp->link.active)
1841 		current_lane_count = intel_dp->lane_count;
1842 	force_lane_count = intel_dp->link.force_lane_count;
1843 
1844 	drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
1845 
1846 	seq_printf(m, "%sauto%s",
1847 		   force_lane_count == 0 ? "[" : "",
1848 		   force_lane_count == 0 ? "]" : "");
1849 
1850 	for (i = 1; i <= 4; i <<= 1)
1851 		seq_printf(m, " %s%d%s%s",
1852 			   i == force_lane_count ? "[" : "",
1853 			   i,
1854 			   i == current_lane_count ? "*" : "",
1855 			   i == force_lane_count ? "]" : "");
1856 
1857 	seq_putc(m, '\n');
1858 
1859 	return 0;
1860 }
1861 
1862 static int parse_lane_count(const char __user *ubuf, size_t len)
1863 {
1864 	char *kbuf;
1865 	const char *p;
1866 	int lane_count;
1867 	int ret = 0;
1868 
1869 	kbuf = memdup_user_nul(ubuf, len);
1870 	if (IS_ERR(kbuf))
1871 		return PTR_ERR(kbuf);
1872 
1873 	p = strim(kbuf);
1874 
1875 	if (!strcmp(p, "auto")) {
1876 		lane_count = 0;
1877 	} else {
1878 		ret = kstrtoint(p, 0, &lane_count);
1879 		if (ret < 0)
1880 			goto out_free;
1881 
1882 		switch (lane_count) {
1883 		case 1:
1884 		case 2:
1885 		case 4:
1886 			break;
1887 		default:
1888 			ret = -EINVAL;
1889 		}
1890 	}
1891 
1892 out_free:
1893 	kfree(kbuf);
1894 
1895 	return ret < 0 ? ret : lane_count;
1896 }
1897 
1898 static ssize_t i915_dp_force_lane_count_write(struct file *file,
1899 					      const char __user *ubuf,
1900 					      size_t len, loff_t *offp)
1901 {
1902 	struct seq_file *m = file->private_data;
1903 	struct intel_connector *connector = to_intel_connector(m->private);
1904 	struct intel_display *display = to_intel_display(connector);
1905 	struct intel_dp *intel_dp = intel_attached_dp(connector);
1906 	int lane_count;
1907 	int err;
1908 
1909 	lane_count = parse_lane_count(ubuf, len);
1910 	if (lane_count < 0)
1911 		return lane_count;
1912 
1913 	err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
1914 	if (err)
1915 		return err;
1916 
1917 	intel_dp_reset_link_params(intel_dp);
1918 	intel_dp->link.force_lane_count = lane_count;
1919 
1920 	drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
1921 
1922 	*offp += len;
1923 
1924 	return len;
1925 }
1926 DEFINE_SHOW_STORE_ATTRIBUTE(i915_dp_force_lane_count);
1927 
1928 static int i915_dp_max_link_rate_show(void *data, u64 *val)
1929 {
1930 	struct intel_connector *connector = to_intel_connector(data);
1931 	struct intel_display *display = to_intel_display(connector);
1932 	struct intel_dp *intel_dp = intel_attached_dp(connector);
1933 	int err;
1934 
1935 	err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
1936 	if (err)
1937 		return err;
1938 
1939 	*val = intel_dp->link.max_rate;
1940 
1941 	drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
1942 
1943 	return 0;
1944 }
1945 DEFINE_DEBUGFS_ATTRIBUTE(i915_dp_max_link_rate_fops, i915_dp_max_link_rate_show, NULL, "%llu\n");
1946 
1947 static int i915_dp_max_lane_count_show(void *data, u64 *val)
1948 {
1949 	struct intel_connector *connector = to_intel_connector(data);
1950 	struct intel_display *display = to_intel_display(connector);
1951 	struct intel_dp *intel_dp = intel_attached_dp(connector);
1952 	int err;
1953 
1954 	err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
1955 	if (err)
1956 		return err;
1957 
1958 	*val = intel_dp->link.max_lane_count;
1959 
1960 	drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
1961 
1962 	return 0;
1963 }
1964 DEFINE_DEBUGFS_ATTRIBUTE(i915_dp_max_lane_count_fops, i915_dp_max_lane_count_show, NULL, "%llu\n");
1965 
1966 static int i915_dp_force_link_training_failure_show(void *data, u64 *val)
1967 {
1968 	struct intel_connector *connector = to_intel_connector(data);
1969 	struct intel_display *display = to_intel_display(connector);
1970 	struct intel_dp *intel_dp = intel_attached_dp(connector);
1971 	int err;
1972 
1973 	err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
1974 	if (err)
1975 		return err;
1976 
1977 	*val = intel_dp->link.force_train_failure;
1978 
1979 	drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
1980 
1981 	return 0;
1982 }
1983 
1984 static int i915_dp_force_link_training_failure_write(void *data, u64 val)
1985 {
1986 	struct intel_connector *connector = to_intel_connector(data);
1987 	struct intel_display *display = to_intel_display(connector);
1988 	struct intel_dp *intel_dp = intel_attached_dp(connector);
1989 	int err;
1990 
1991 	if (val > 2)
1992 		return -EINVAL;
1993 
1994 	err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
1995 	if (err)
1996 		return err;
1997 
1998 	intel_dp->link.force_train_failure = val;
1999 
2000 	drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
2001 
2002 	return 0;
2003 }
2004 DEFINE_DEBUGFS_ATTRIBUTE(i915_dp_force_link_training_failure_fops,
2005 			 i915_dp_force_link_training_failure_show,
2006 			 i915_dp_force_link_training_failure_write, "%llu\n");
2007 
2008 static int i915_dp_force_link_retrain_show(void *data, u64 *val)
2009 {
2010 	struct intel_connector *connector = to_intel_connector(data);
2011 	struct intel_display *display = to_intel_display(connector);
2012 	struct intel_dp *intel_dp = intel_attached_dp(connector);
2013 	int err;
2014 
2015 	err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
2016 	if (err)
2017 		return err;
2018 
2019 	*val = intel_dp->link.force_retrain;
2020 
2021 	drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
2022 
2023 	return 0;
2024 }
2025 
2026 static int i915_dp_force_link_retrain_write(void *data, u64 val)
2027 {
2028 	struct intel_connector *connector = to_intel_connector(data);
2029 	struct intel_display *display = to_intel_display(connector);
2030 	struct intel_dp *intel_dp = intel_attached_dp(connector);
2031 	int err;
2032 
2033 	err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
2034 	if (err)
2035 		return err;
2036 
2037 	intel_dp->link.force_retrain = val;
2038 
2039 	drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
2040 
2041 	intel_hpd_trigger_irq(dp_to_dig_port(intel_dp));
2042 
2043 	return 0;
2044 }
2045 DEFINE_DEBUGFS_ATTRIBUTE(i915_dp_force_link_retrain_fops,
2046 			 i915_dp_force_link_retrain_show,
2047 			 i915_dp_force_link_retrain_write, "%llu\n");
2048 
2049 static int i915_dp_link_retrain_disabled_show(struct seq_file *m, void *data)
2050 {
2051 	struct intel_connector *connector = to_intel_connector(m->private);
2052 	struct intel_display *display = to_intel_display(connector);
2053 	struct intel_dp *intel_dp = intel_attached_dp(connector);
2054 	int err;
2055 
2056 	err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
2057 	if (err)
2058 		return err;
2059 
2060 	seq_printf(m, "%s\n", str_yes_no(intel_dp->link.retrain_disabled));
2061 
2062 	drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
2063 
2064 	return 0;
2065 }
2066 DEFINE_SHOW_ATTRIBUTE(i915_dp_link_retrain_disabled);
2067 
2068 void intel_dp_link_training_debugfs_add(struct intel_connector *connector)
2069 {
2070 	struct dentry *root = connector->base.debugfs_entry;
2071 
2072 	if (connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort &&
2073 	    connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
2074 		return;
2075 
2076 	debugfs_create_file("i915_dp_force_link_rate", 0644, root,
2077 			    connector, &i915_dp_force_link_rate_fops);
2078 
2079 	debugfs_create_file("i915_dp_force_lane_count", 0644, root,
2080 			    connector, &i915_dp_force_lane_count_fops);
2081 
2082 	debugfs_create_file("i915_dp_max_link_rate", 0444, root,
2083 			    connector, &i915_dp_max_link_rate_fops);
2084 
2085 	debugfs_create_file("i915_dp_max_lane_count", 0444, root,
2086 			    connector, &i915_dp_max_lane_count_fops);
2087 
2088 	debugfs_create_file("i915_dp_force_link_training_failure", 0644, root,
2089 			    connector, &i915_dp_force_link_training_failure_fops);
2090 
2091 	debugfs_create_file("i915_dp_force_link_retrain", 0644, root,
2092 			    connector, &i915_dp_force_link_retrain_fops);
2093 
2094 	debugfs_create_file("i915_dp_link_retrain_disabled", 0444, root,
2095 			    connector, &i915_dp_link_retrain_disabled_fops);
2096 }
2097