xref: /linux/drivers/gpu/drm/i915/display/intel_dp_link_training.c (revision 6e7fd890f1d6ac83805409e9c346240de2705584)
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "i915_drv.h"
25 #include "intel_display_types.h"
26 #include "intel_dp.h"
27 #include "intel_dp_link_training.h"
28 #include "intel_encoder.h"
29 #include "intel_hotplug.h"
30 #include "intel_panel.h"
31 
32 #define LT_MSG_PREFIX			"[CONNECTOR:%d:%s][ENCODER:%d:%s][%s] "
33 #define LT_MSG_ARGS(_intel_dp, _dp_phy)	(_intel_dp)->attached_connector->base.base.id, \
34 					(_intel_dp)->attached_connector->base.name, \
35 					dp_to_dig_port(_intel_dp)->base.base.base.id, \
36 					dp_to_dig_port(_intel_dp)->base.base.name, \
37 					drm_dp_phy_name(_dp_phy)
38 
39 #define lt_dbg(_intel_dp, _dp_phy, _format, ...) \
40 	drm_dbg_kms(&dp_to_i915(_intel_dp)->drm, \
41 		    LT_MSG_PREFIX _format, \
42 		    LT_MSG_ARGS(_intel_dp, _dp_phy), ## __VA_ARGS__)
43 
44 #define lt_err(_intel_dp, _dp_phy, _format, ...) do { \
45 	if (intel_digital_port_connected(&dp_to_dig_port(_intel_dp)->base)) \
46 		drm_err(&dp_to_i915(_intel_dp)->drm, \
47 			LT_MSG_PREFIX _format, \
48 			LT_MSG_ARGS(_intel_dp, _dp_phy), ## __VA_ARGS__); \
49 	else \
50 		lt_dbg(_intel_dp, _dp_phy, "Sink disconnected: " _format, ## __VA_ARGS__); \
51 } while (0)
52 
53 static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp)
54 {
55 	memset(intel_dp->lttpr_common_caps, 0, sizeof(intel_dp->lttpr_common_caps));
56 }
57 
58 static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp)
59 {
60 	intel_dp->lttpr_common_caps[DP_PHY_REPEATER_CNT -
61 				    DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] = 0;
62 }
63 
64 static u8 *intel_dp_lttpr_phy_caps(struct intel_dp *intel_dp,
65 				   enum drm_dp_phy dp_phy)
66 {
67 	return intel_dp->lttpr_phy_caps[dp_phy - DP_PHY_LTTPR1];
68 }
69 
70 static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp,
71 					 const u8 dpcd[DP_RECEIVER_CAP_SIZE],
72 					 enum drm_dp_phy dp_phy)
73 {
74 	u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
75 
76 	if (drm_dp_read_lttpr_phy_caps(&intel_dp->aux, dpcd, dp_phy, phy_caps) < 0) {
77 		lt_dbg(intel_dp, dp_phy, "failed to read the PHY caps\n");
78 		return;
79 	}
80 
81 	lt_dbg(intel_dp, dp_phy, "PHY capabilities: %*ph\n",
82 	       (int)sizeof(intel_dp->lttpr_phy_caps[0]),
83 	       phy_caps);
84 }
85 
86 static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp,
87 					    const u8 dpcd[DP_RECEIVER_CAP_SIZE])
88 {
89 	int ret;
90 
91 	ret = drm_dp_read_lttpr_common_caps(&intel_dp->aux, dpcd,
92 					    intel_dp->lttpr_common_caps);
93 	if (ret < 0)
94 		goto reset_caps;
95 
96 	lt_dbg(intel_dp, DP_PHY_DPRX, "LTTPR common capabilities: %*ph\n",
97 	       (int)sizeof(intel_dp->lttpr_common_caps),
98 	       intel_dp->lttpr_common_caps);
99 
100 	/* The minimum value of LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV is 1.4 */
101 	if (intel_dp->lttpr_common_caps[0] < 0x14)
102 		goto reset_caps;
103 
104 	return true;
105 
106 reset_caps:
107 	intel_dp_reset_lttpr_common_caps(intel_dp);
108 	return false;
109 }
110 
111 static bool
112 intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
113 {
114 	u8 val = enable ? DP_PHY_REPEATER_MODE_TRANSPARENT :
115 			  DP_PHY_REPEATER_MODE_NON_TRANSPARENT;
116 
117 	return drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) == 1;
118 }
119 
120 static bool intel_dp_lttpr_transparent_mode_enabled(struct intel_dp *intel_dp)
121 {
122 	return intel_dp->lttpr_common_caps[DP_PHY_REPEATER_MODE -
123 					   DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] ==
124 		DP_PHY_REPEATER_MODE_TRANSPARENT;
125 }
126 
127 /*
128  * Read the LTTPR common capabilities and switch the LTTPR PHYs to
129  * non-transparent mode if this is supported. Preserve the
130  * transparent/non-transparent mode on an active link.
131  *
132  * Return the number of detected LTTPRs in non-transparent mode or 0 if the
133  * LTTPRs are in transparent mode or the detection failed.
134  */
135 static int intel_dp_init_lttpr_phys(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE])
136 {
137 	int lttpr_count;
138 
139 	if (!intel_dp_read_lttpr_common_caps(intel_dp, dpcd))
140 		return 0;
141 
142 	lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
143 	/*
144 	 * Prevent setting LTTPR transparent mode explicitly if no LTTPRs are
145 	 * detected as this breaks link training at least on the Dell WD19TB
146 	 * dock.
147 	 */
148 	if (lttpr_count == 0)
149 		return 0;
150 
151 	/*
152 	 * Don't change the mode on an active link, to prevent a loss of link
153 	 * synchronization. See DP Standard v2.0 3.6.7. about the LTTPR
154 	 * resetting its internal state when the mode is changed from
155 	 * non-transparent to transparent.
156 	 */
157 	if (intel_dp->link_trained) {
158 		if (lttpr_count < 0 || intel_dp_lttpr_transparent_mode_enabled(intel_dp))
159 			goto out_reset_lttpr_count;
160 
161 		return lttpr_count;
162 	}
163 
164 	/*
165 	 * See DP Standard v2.0 3.6.6.1. about the explicit disabling of
166 	 * non-transparent mode and the disable->enable non-transparent mode
167 	 * sequence.
168 	 */
169 	intel_dp_set_lttpr_transparent_mode(intel_dp, true);
170 
171 	/*
172 	 * In case of unsupported number of LTTPRs or failing to switch to
173 	 * non-transparent mode fall-back to transparent link training mode,
174 	 * still taking into account any LTTPR common lane- rate/count limits.
175 	 */
176 	if (lttpr_count < 0)
177 		return 0;
178 
179 	if (!intel_dp_set_lttpr_transparent_mode(intel_dp, false)) {
180 		lt_dbg(intel_dp, DP_PHY_DPRX,
181 		       "Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n");
182 
183 		intel_dp_set_lttpr_transparent_mode(intel_dp, true);
184 
185 		goto out_reset_lttpr_count;
186 	}
187 
188 	return lttpr_count;
189 
190 out_reset_lttpr_count:
191 	intel_dp_reset_lttpr_count(intel_dp);
192 
193 	return 0;
194 }
195 
196 static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE])
197 {
198 	int lttpr_count;
199 	int i;
200 
201 	lttpr_count = intel_dp_init_lttpr_phys(intel_dp, dpcd);
202 
203 	for (i = 0; i < lttpr_count; i++)
204 		intel_dp_read_lttpr_phy_caps(intel_dp, dpcd, DP_PHY_LTTPR(i));
205 
206 	return lttpr_count;
207 }
208 
209 int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_SIZE])
210 {
211 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
212 
213 	if (intel_dp_is_edp(intel_dp))
214 		return 0;
215 
216 	/*
217 	 * Detecting LTTPRs must be avoided on platforms with an AUX timeout
218 	 * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
219 	 */
220 	if (DISPLAY_VER(i915) >= 10 && !IS_GEMINILAKE(i915))
221 		if (drm_dp_dpcd_probe(&intel_dp->aux,
222 				      DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV))
223 			return -EIO;
224 
225 	if (drm_dp_read_dpcd_caps(&intel_dp->aux, dpcd))
226 		return -EIO;
227 
228 	return 0;
229 }
230 
231 /**
232  * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode
233  * @intel_dp: Intel DP struct
234  *
235  * Read the LTTPR common and DPRX capabilities and switch to non-transparent
236  * link training mode if any is detected and read the PHY capabilities for all
237  * detected LTTPRs. In case of an LTTPR detection error or if the number of
238  * LTTPRs is more than is supported (8), fall back to the no-LTTPR,
239  * transparent mode link training mode.
240  *
241  * Returns:
242  *   >0  if LTTPRs were detected and the non-transparent LT mode was set. The
243  *       DPRX capabilities are read out.
244  *    0  if no LTTPRs or more than 8 LTTPRs were detected or in case of a
245  *       detection failure and the transparent LT mode was set. The DPRX
246  *       capabilities are read out.
247  *   <0  Reading out the DPRX capabilities failed.
248  */
249 int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
250 {
251 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
252 	int lttpr_count = 0;
253 
254 	/*
255 	 * Detecting LTTPRs must be avoided on platforms with an AUX timeout
256 	 * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
257 	 */
258 	if (!intel_dp_is_edp(intel_dp) &&
259 	    (DISPLAY_VER(i915) >= 10 && !IS_GEMINILAKE(i915))) {
260 		u8 dpcd[DP_RECEIVER_CAP_SIZE];
261 		int err = intel_dp_read_dprx_caps(intel_dp, dpcd);
262 
263 		if (err != 0)
264 			return err;
265 
266 		lttpr_count = intel_dp_init_lttpr(intel_dp, dpcd);
267 	}
268 
269 	/*
270 	 * The DPTX shall read the DPRX caps after LTTPR detection, so re-read
271 	 * it here.
272 	 */
273 	if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) {
274 		intel_dp_reset_lttpr_common_caps(intel_dp);
275 		return -EIO;
276 	}
277 
278 	return lttpr_count;
279 }
280 
281 static u8 dp_voltage_max(u8 preemph)
282 {
283 	switch (preemph & DP_TRAIN_PRE_EMPHASIS_MASK) {
284 	case DP_TRAIN_PRE_EMPH_LEVEL_0:
285 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
286 	case DP_TRAIN_PRE_EMPH_LEVEL_1:
287 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
288 	case DP_TRAIN_PRE_EMPH_LEVEL_2:
289 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
290 	case DP_TRAIN_PRE_EMPH_LEVEL_3:
291 	default:
292 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
293 	}
294 }
295 
296 static u8 intel_dp_lttpr_voltage_max(struct intel_dp *intel_dp,
297 				     enum drm_dp_phy dp_phy)
298 {
299 	const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
300 
301 	if (drm_dp_lttpr_voltage_swing_level_3_supported(phy_caps))
302 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
303 	else
304 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
305 }
306 
307 static u8 intel_dp_lttpr_preemph_max(struct intel_dp *intel_dp,
308 				     enum drm_dp_phy dp_phy)
309 {
310 	const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
311 
312 	if (drm_dp_lttpr_pre_emphasis_level_3_supported(phy_caps))
313 		return DP_TRAIN_PRE_EMPH_LEVEL_3;
314 	else
315 		return DP_TRAIN_PRE_EMPH_LEVEL_2;
316 }
317 
318 static bool
319 intel_dp_phy_is_downstream_of_source(struct intel_dp *intel_dp,
320 				     enum drm_dp_phy dp_phy)
321 {
322 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
323 	int lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
324 
325 	drm_WARN_ON_ONCE(&i915->drm, lttpr_count <= 0 && dp_phy != DP_PHY_DPRX);
326 
327 	return lttpr_count <= 0 || dp_phy == DP_PHY_LTTPR(lttpr_count - 1);
328 }
329 
330 static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp,
331 				   const struct intel_crtc_state *crtc_state,
332 				   enum drm_dp_phy dp_phy)
333 {
334 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
335 	u8 voltage_max;
336 
337 	/*
338 	 * Get voltage_max from the DPTX_PHY (source or LTTPR) upstream from
339 	 * the DPRX_PHY we train.
340 	 */
341 	if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
342 		voltage_max = intel_dp->voltage_max(intel_dp, crtc_state);
343 	else
344 		voltage_max = intel_dp_lttpr_voltage_max(intel_dp, dp_phy + 1);
345 
346 	drm_WARN_ON_ONCE(&i915->drm,
347 			 voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_2 &&
348 			 voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_3);
349 
350 	return voltage_max;
351 }
352 
353 static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp,
354 				   enum drm_dp_phy dp_phy)
355 {
356 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
357 	u8 preemph_max;
358 
359 	/*
360 	 * Get preemph_max from the DPTX_PHY (source or LTTPR) upstream from
361 	 * the DPRX_PHY we train.
362 	 */
363 	if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
364 		preemph_max = intel_dp->preemph_max(intel_dp);
365 	else
366 		preemph_max = intel_dp_lttpr_preemph_max(intel_dp, dp_phy + 1);
367 
368 	drm_WARN_ON_ONCE(&i915->drm,
369 			 preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_2 &&
370 			 preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_3);
371 
372 	return preemph_max;
373 }
374 
375 static bool has_per_lane_signal_levels(struct intel_dp *intel_dp,
376 				       enum drm_dp_phy dp_phy)
377 {
378 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
379 
380 	return !intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy) ||
381 		DISPLAY_VER(i915) >= 10 || IS_BROXTON(i915);
382 }
383 
384 /* 128b/132b */
385 static u8 intel_dp_get_lane_adjust_tx_ffe_preset(struct intel_dp *intel_dp,
386 						 const struct intel_crtc_state *crtc_state,
387 						 enum drm_dp_phy dp_phy,
388 						 const u8 link_status[DP_LINK_STATUS_SIZE],
389 						 int lane)
390 {
391 	u8 tx_ffe = 0;
392 
393 	if (has_per_lane_signal_levels(intel_dp, dp_phy)) {
394 		lane = min(lane, crtc_state->lane_count - 1);
395 		tx_ffe = drm_dp_get_adjust_tx_ffe_preset(link_status, lane);
396 	} else {
397 		for (lane = 0; lane < crtc_state->lane_count; lane++)
398 			tx_ffe = max(tx_ffe, drm_dp_get_adjust_tx_ffe_preset(link_status, lane));
399 	}
400 
401 	return tx_ffe;
402 }
403 
404 /* 8b/10b */
405 static u8 intel_dp_get_lane_adjust_vswing_preemph(struct intel_dp *intel_dp,
406 						  const struct intel_crtc_state *crtc_state,
407 						  enum drm_dp_phy dp_phy,
408 						  const u8 link_status[DP_LINK_STATUS_SIZE],
409 						  int lane)
410 {
411 	u8 v = 0;
412 	u8 p = 0;
413 	u8 voltage_max;
414 	u8 preemph_max;
415 
416 	if (has_per_lane_signal_levels(intel_dp, dp_phy)) {
417 		lane = min(lane, crtc_state->lane_count - 1);
418 
419 		v = drm_dp_get_adjust_request_voltage(link_status, lane);
420 		p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
421 	} else {
422 		for (lane = 0; lane < crtc_state->lane_count; lane++) {
423 			v = max(v, drm_dp_get_adjust_request_voltage(link_status, lane));
424 			p = max(p, drm_dp_get_adjust_request_pre_emphasis(link_status, lane));
425 		}
426 	}
427 
428 	preemph_max = intel_dp_phy_preemph_max(intel_dp, dp_phy);
429 	if (p >= preemph_max)
430 		p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
431 
432 	v = min(v, dp_voltage_max(p));
433 
434 	voltage_max = intel_dp_phy_voltage_max(intel_dp, crtc_state, dp_phy);
435 	if (v >= voltage_max)
436 		v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
437 
438 	return v | p;
439 }
440 
441 static u8 intel_dp_get_lane_adjust_train(struct intel_dp *intel_dp,
442 					 const struct intel_crtc_state *crtc_state,
443 					 enum drm_dp_phy dp_phy,
444 					 const u8 link_status[DP_LINK_STATUS_SIZE],
445 					 int lane)
446 {
447 	if (intel_dp_is_uhbr(crtc_state))
448 		return intel_dp_get_lane_adjust_tx_ffe_preset(intel_dp, crtc_state,
449 							      dp_phy, link_status, lane);
450 	else
451 		return intel_dp_get_lane_adjust_vswing_preemph(intel_dp, crtc_state,
452 							       dp_phy, link_status, lane);
453 }
454 
455 #define TRAIN_REQ_FMT "%d/%d/%d/%d"
456 #define _TRAIN_REQ_VSWING_ARGS(link_status, lane) \
457 	(drm_dp_get_adjust_request_voltage((link_status), (lane)) >> DP_TRAIN_VOLTAGE_SWING_SHIFT)
458 #define TRAIN_REQ_VSWING_ARGS(link_status) \
459 	_TRAIN_REQ_VSWING_ARGS(link_status, 0), \
460 	_TRAIN_REQ_VSWING_ARGS(link_status, 1), \
461 	_TRAIN_REQ_VSWING_ARGS(link_status, 2), \
462 	_TRAIN_REQ_VSWING_ARGS(link_status, 3)
463 #define _TRAIN_REQ_PREEMPH_ARGS(link_status, lane) \
464 	(drm_dp_get_adjust_request_pre_emphasis((link_status), (lane)) >> DP_TRAIN_PRE_EMPHASIS_SHIFT)
465 #define TRAIN_REQ_PREEMPH_ARGS(link_status) \
466 	_TRAIN_REQ_PREEMPH_ARGS(link_status, 0), \
467 	_TRAIN_REQ_PREEMPH_ARGS(link_status, 1), \
468 	_TRAIN_REQ_PREEMPH_ARGS(link_status, 2), \
469 	_TRAIN_REQ_PREEMPH_ARGS(link_status, 3)
470 #define _TRAIN_REQ_TX_FFE_ARGS(link_status, lane) \
471 	drm_dp_get_adjust_tx_ffe_preset((link_status), (lane))
472 #define TRAIN_REQ_TX_FFE_ARGS(link_status) \
473 	_TRAIN_REQ_TX_FFE_ARGS(link_status, 0), \
474 	_TRAIN_REQ_TX_FFE_ARGS(link_status, 1), \
475 	_TRAIN_REQ_TX_FFE_ARGS(link_status, 2), \
476 	_TRAIN_REQ_TX_FFE_ARGS(link_status, 3)
477 
478 void
479 intel_dp_get_adjust_train(struct intel_dp *intel_dp,
480 			  const struct intel_crtc_state *crtc_state,
481 			  enum drm_dp_phy dp_phy,
482 			  const u8 link_status[DP_LINK_STATUS_SIZE])
483 {
484 	int lane;
485 
486 	if (intel_dp_is_uhbr(crtc_state)) {
487 		lt_dbg(intel_dp, dp_phy,
488 		       "128b/132b, lanes: %d, "
489 		       "TX FFE request: " TRAIN_REQ_FMT "\n",
490 		       crtc_state->lane_count,
491 		       TRAIN_REQ_TX_FFE_ARGS(link_status));
492 	} else {
493 		lt_dbg(intel_dp, dp_phy,
494 		       "8b/10b, lanes: %d, "
495 		       "vswing request: " TRAIN_REQ_FMT ", "
496 		       "pre-emphasis request: " TRAIN_REQ_FMT "\n",
497 		       crtc_state->lane_count,
498 		       TRAIN_REQ_VSWING_ARGS(link_status),
499 		       TRAIN_REQ_PREEMPH_ARGS(link_status));
500 	}
501 
502 	for (lane = 0; lane < 4; lane++)
503 		intel_dp->train_set[lane] =
504 			intel_dp_get_lane_adjust_train(intel_dp, crtc_state,
505 						       dp_phy, link_status, lane);
506 }
507 
508 static int intel_dp_training_pattern_set_reg(struct intel_dp *intel_dp,
509 					     enum drm_dp_phy dp_phy)
510 {
511 	return dp_phy == DP_PHY_DPRX ?
512 		DP_TRAINING_PATTERN_SET :
513 		DP_TRAINING_PATTERN_SET_PHY_REPEATER(dp_phy);
514 }
515 
516 static bool
517 intel_dp_set_link_train(struct intel_dp *intel_dp,
518 			const struct intel_crtc_state *crtc_state,
519 			enum drm_dp_phy dp_phy,
520 			u8 dp_train_pat)
521 {
522 	int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy);
523 	u8 buf[sizeof(intel_dp->train_set) + 1];
524 	int len;
525 
526 	intel_dp_program_link_training_pattern(intel_dp, crtc_state,
527 					       dp_phy, dp_train_pat);
528 
529 	buf[0] = dp_train_pat;
530 	/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
531 	memcpy(buf + 1, intel_dp->train_set, crtc_state->lane_count);
532 	len = crtc_state->lane_count + 1;
533 
534 	return drm_dp_dpcd_write(&intel_dp->aux, reg, buf, len) == len;
535 }
536 
537 static char dp_training_pattern_name(u8 train_pat)
538 {
539 	switch (train_pat) {
540 	case DP_TRAINING_PATTERN_1:
541 	case DP_TRAINING_PATTERN_2:
542 	case DP_TRAINING_PATTERN_3:
543 		return '0' + train_pat;
544 	case DP_TRAINING_PATTERN_4:
545 		return '4';
546 	default:
547 		MISSING_CASE(train_pat);
548 		return '?';
549 	}
550 }
551 
552 void
553 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
554 				       const struct intel_crtc_state *crtc_state,
555 				       enum drm_dp_phy dp_phy,
556 				       u8 dp_train_pat)
557 {
558 	u8 train_pat = intel_dp_training_pattern_symbol(dp_train_pat);
559 
560 	if (train_pat != DP_TRAINING_PATTERN_DISABLE)
561 		lt_dbg(intel_dp, dp_phy, "Using DP training pattern TPS%c\n",
562 		       dp_training_pattern_name(train_pat));
563 
564 	intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat);
565 }
566 
567 #define TRAIN_SET_FMT "%d%s/%d%s/%d%s/%d%s"
568 #define _TRAIN_SET_VSWING_ARGS(train_set) \
569 	((train_set) & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT, \
570 	(train_set) & DP_TRAIN_MAX_SWING_REACHED ? "(max)" : ""
571 #define TRAIN_SET_VSWING_ARGS(train_set) \
572 	_TRAIN_SET_VSWING_ARGS((train_set)[0]), \
573 	_TRAIN_SET_VSWING_ARGS((train_set)[1]), \
574 	_TRAIN_SET_VSWING_ARGS((train_set)[2]), \
575 	_TRAIN_SET_VSWING_ARGS((train_set)[3])
576 #define _TRAIN_SET_PREEMPH_ARGS(train_set) \
577 	((train_set) & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT, \
578 	(train_set) & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ? "(max)" : ""
579 #define TRAIN_SET_PREEMPH_ARGS(train_set) \
580 	_TRAIN_SET_PREEMPH_ARGS((train_set)[0]), \
581 	_TRAIN_SET_PREEMPH_ARGS((train_set)[1]), \
582 	_TRAIN_SET_PREEMPH_ARGS((train_set)[2]), \
583 	_TRAIN_SET_PREEMPH_ARGS((train_set)[3])
584 #define _TRAIN_SET_TX_FFE_ARGS(train_set) \
585 	((train_set) & DP_TX_FFE_PRESET_VALUE_MASK), ""
586 #define TRAIN_SET_TX_FFE_ARGS(train_set) \
587 	_TRAIN_SET_TX_FFE_ARGS((train_set)[0]), \
588 	_TRAIN_SET_TX_FFE_ARGS((train_set)[1]), \
589 	_TRAIN_SET_TX_FFE_ARGS((train_set)[2]), \
590 	_TRAIN_SET_TX_FFE_ARGS((train_set)[3])
591 
592 void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
593 				const struct intel_crtc_state *crtc_state,
594 				enum drm_dp_phy dp_phy)
595 {
596 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
597 
598 	if (intel_dp_is_uhbr(crtc_state)) {
599 		lt_dbg(intel_dp, dp_phy,
600 		       "128b/132b, lanes: %d, "
601 		       "TX FFE presets: " TRAIN_SET_FMT "\n",
602 		       crtc_state->lane_count,
603 		       TRAIN_SET_TX_FFE_ARGS(intel_dp->train_set));
604 	} else {
605 		lt_dbg(intel_dp, dp_phy,
606 		       "8b/10b, lanes: %d, "
607 		       "vswing levels: " TRAIN_SET_FMT ", "
608 		       "pre-emphasis levels: " TRAIN_SET_FMT "\n",
609 		       crtc_state->lane_count,
610 		       TRAIN_SET_VSWING_ARGS(intel_dp->train_set),
611 		       TRAIN_SET_PREEMPH_ARGS(intel_dp->train_set));
612 	}
613 
614 	if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
615 		encoder->set_signal_levels(encoder, crtc_state);
616 }
617 
618 static bool
619 intel_dp_reset_link_train(struct intel_dp *intel_dp,
620 			  const struct intel_crtc_state *crtc_state,
621 			  enum drm_dp_phy dp_phy,
622 			  u8 dp_train_pat)
623 {
624 	memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
625 	intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
626 	return intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, dp_train_pat);
627 }
628 
629 static bool
630 intel_dp_update_link_train(struct intel_dp *intel_dp,
631 			   const struct intel_crtc_state *crtc_state,
632 			   enum drm_dp_phy dp_phy)
633 {
634 	int reg = dp_phy == DP_PHY_DPRX ?
635 			    DP_TRAINING_LANE0_SET :
636 			    DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy);
637 	int ret;
638 
639 	intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
640 
641 	ret = drm_dp_dpcd_write(&intel_dp->aux, reg,
642 				intel_dp->train_set, crtc_state->lane_count);
643 
644 	return ret == crtc_state->lane_count;
645 }
646 
647 /* 128b/132b */
648 static bool intel_dp_lane_max_tx_ffe_reached(u8 train_set_lane)
649 {
650 	return (train_set_lane & DP_TX_FFE_PRESET_VALUE_MASK) ==
651 		DP_TX_FFE_PRESET_VALUE_MASK;
652 }
653 
654 /*
655  * 8b/10b
656  *
657  * FIXME: The DP spec is very confusing here, also the Link CTS spec seems to
658  * have self contradicting tests around this area.
659  *
660  * In lieu of better ideas let's just stop when we've reached the max supported
661  * vswing with its max pre-emphasis, which is either 2+1 or 3+0 depending on
662  * whether vswing level 3 is supported or not.
663  */
664 static bool intel_dp_lane_max_vswing_reached(u8 train_set_lane)
665 {
666 	u8 v = (train_set_lane & DP_TRAIN_VOLTAGE_SWING_MASK) >>
667 		DP_TRAIN_VOLTAGE_SWING_SHIFT;
668 	u8 p = (train_set_lane & DP_TRAIN_PRE_EMPHASIS_MASK) >>
669 		DP_TRAIN_PRE_EMPHASIS_SHIFT;
670 
671 	if ((train_set_lane & DP_TRAIN_MAX_SWING_REACHED) == 0)
672 		return false;
673 
674 	if (v + p != 3)
675 		return false;
676 
677 	return true;
678 }
679 
680 static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp,
681 					     const struct intel_crtc_state *crtc_state)
682 {
683 	int lane;
684 
685 	for (lane = 0; lane < crtc_state->lane_count; lane++) {
686 		u8 train_set_lane = intel_dp->train_set[lane];
687 
688 		if (intel_dp_is_uhbr(crtc_state)) {
689 			if (!intel_dp_lane_max_tx_ffe_reached(train_set_lane))
690 				return false;
691 		} else {
692 			if (!intel_dp_lane_max_vswing_reached(train_set_lane))
693 				return false;
694 		}
695 	}
696 
697 	return true;
698 }
699 
700 static void
701 intel_dp_update_downspread_ctrl(struct intel_dp *intel_dp,
702 				const struct intel_crtc_state *crtc_state)
703 {
704 	u8 link_config[2];
705 
706 	link_config[0] = crtc_state->vrr.flipline ? DP_MSA_TIMING_PAR_IGNORE_EN : 0;
707 	link_config[1] = intel_dp_is_uhbr(crtc_state) ?
708 			 DP_SET_ANSI_128B132B : DP_SET_ANSI_8B10B;
709 	drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
710 }
711 
712 static void
713 intel_dp_update_link_bw_set(struct intel_dp *intel_dp,
714 			    const struct intel_crtc_state *crtc_state,
715 			    u8 link_bw, u8 rate_select)
716 {
717 	u8 lane_count = crtc_state->lane_count;
718 
719 	if (crtc_state->enhanced_framing)
720 		lane_count |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
721 
722 	if (link_bw) {
723 		/* DP and eDP v1.3 and earlier link bw set method. */
724 		u8 link_config[] = { link_bw, lane_count };
725 
726 		drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config,
727 				  ARRAY_SIZE(link_config));
728 	} else {
729 		/*
730 		 * eDP v1.4 and later link rate set method.
731 		 *
732 		 * eDP v1.4x sinks shall ignore DP_LINK_RATE_SET if
733 		 * DP_LINK_BW_SET is set. Avoid writing DP_LINK_BW_SET.
734 		 *
735 		 * eDP v1.5 sinks allow choosing either, and the last choice
736 		 * shall be active.
737 		 */
738 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_LANE_COUNT_SET, lane_count);
739 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_LINK_RATE_SET, rate_select);
740 	}
741 }
742 
743 /*
744  * Prepare link training by configuring the link parameters. On DDI platforms
745  * also enable the port here.
746  */
747 static bool
748 intel_dp_prepare_link_train(struct intel_dp *intel_dp,
749 			    const struct intel_crtc_state *crtc_state)
750 {
751 	u8 link_bw, rate_select;
752 
753 	if (intel_dp->prepare_link_retrain)
754 		intel_dp->prepare_link_retrain(intel_dp, crtc_state);
755 
756 	intel_dp_compute_rate(intel_dp, crtc_state->port_clock,
757 			      &link_bw, &rate_select);
758 
759 	/*
760 	 * WaEdpLinkRateDataReload
761 	 *
762 	 * Parade PS8461E MUX (used on varius TGL+ laptops) needs
763 	 * to snoop the link rates reported by the sink when we
764 	 * use LINK_RATE_SET in order to operate in jitter cleaning
765 	 * mode (as opposed to redriver mode). Unfortunately it
766 	 * loses track of the snooped link rates when powered down,
767 	 * so we need to make it re-snoop often. Without this high
768 	 * link rates are not stable.
769 	 */
770 	if (!link_bw) {
771 		__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
772 
773 		lt_dbg(intel_dp, DP_PHY_DPRX, "Reloading eDP link rates\n");
774 
775 		drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
776 				 sink_rates, sizeof(sink_rates));
777 	}
778 
779 	if (link_bw)
780 		lt_dbg(intel_dp, DP_PHY_DPRX, "Using LINK_BW_SET value %02x\n",
781 		       link_bw);
782 	else
783 		lt_dbg(intel_dp, DP_PHY_DPRX,
784 		       "Using LINK_RATE_SET value %02x\n",
785 		       rate_select);
786 	/*
787 	 * Spec DP2.1 Section 3.5.2.16
788 	 * Prior to LT DPTX should set 128b/132b DP Channel coding and then set link rate
789 	 */
790 	intel_dp_update_downspread_ctrl(intel_dp, crtc_state);
791 	intel_dp_update_link_bw_set(intel_dp, crtc_state, link_bw,
792 				    rate_select);
793 
794 	return true;
795 }
796 
797 static bool intel_dp_adjust_request_changed(const struct intel_crtc_state *crtc_state,
798 					    const u8 old_link_status[DP_LINK_STATUS_SIZE],
799 					    const u8 new_link_status[DP_LINK_STATUS_SIZE])
800 {
801 	int lane;
802 
803 	for (lane = 0; lane < crtc_state->lane_count; lane++) {
804 		u8 old, new;
805 
806 		if (intel_dp_is_uhbr(crtc_state)) {
807 			old = drm_dp_get_adjust_tx_ffe_preset(old_link_status, lane);
808 			new = drm_dp_get_adjust_tx_ffe_preset(new_link_status, lane);
809 		} else {
810 			old = drm_dp_get_adjust_request_voltage(old_link_status, lane) |
811 				drm_dp_get_adjust_request_pre_emphasis(old_link_status, lane);
812 			new = drm_dp_get_adjust_request_voltage(new_link_status, lane) |
813 				drm_dp_get_adjust_request_pre_emphasis(new_link_status, lane);
814 		}
815 
816 		if (old != new)
817 			return true;
818 	}
819 
820 	return false;
821 }
822 
823 void
824 intel_dp_dump_link_status(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy,
825 			  const u8 link_status[DP_LINK_STATUS_SIZE])
826 {
827 	lt_dbg(intel_dp, dp_phy,
828 	       "ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x\n",
829 	       link_status[0], link_status[1], link_status[2],
830 	       link_status[3], link_status[4], link_status[5]);
831 }
832 
833 /*
834  * Perform the link training clock recovery phase on the given DP PHY using
835  * training pattern 1.
836  */
837 static bool
838 intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
839 				      const struct intel_crtc_state *crtc_state,
840 				      enum drm_dp_phy dp_phy)
841 {
842 	u8 old_link_status[DP_LINK_STATUS_SIZE] = {};
843 	int voltage_tries, cr_tries, max_cr_tries;
844 	u8 link_status[DP_LINK_STATUS_SIZE];
845 	bool max_vswing_reached = false;
846 	int delay_us;
847 
848 	delay_us = drm_dp_read_clock_recovery_delay(&intel_dp->aux,
849 						    intel_dp->dpcd, dp_phy,
850 						    intel_dp_is_uhbr(crtc_state));
851 
852 	/* clock recovery */
853 	if (!intel_dp_reset_link_train(intel_dp, crtc_state, dp_phy,
854 				       DP_TRAINING_PATTERN_1 |
855 				       DP_LINK_SCRAMBLING_DISABLE)) {
856 		lt_err(intel_dp, dp_phy, "Failed to enable link training\n");
857 		return false;
858 	}
859 
860 	/*
861 	 * The DP 1.4 spec defines the max clock recovery retries value
862 	 * as 10 but for pre-DP 1.4 devices we set a very tolerant
863 	 * retry limit of 80 (4 voltage levels x 4 preemphasis levels x
864 	 * x 5 identical voltage retries). Since the previous specs didn't
865 	 * define a limit and created the possibility of an infinite loop
866 	 * we want to prevent any sync from triggering that corner case.
867 	 */
868 	if (intel_dp->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14)
869 		max_cr_tries = 10;
870 	else
871 		max_cr_tries = 80;
872 
873 	voltage_tries = 1;
874 	for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) {
875 		usleep_range(delay_us, 2 * delay_us);
876 
877 		if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
878 						     link_status) < 0) {
879 			lt_err(intel_dp, dp_phy, "Failed to get link status\n");
880 			return false;
881 		}
882 
883 		if (drm_dp_clock_recovery_ok(link_status, crtc_state->lane_count)) {
884 			lt_dbg(intel_dp, dp_phy, "Clock recovery OK\n");
885 			return true;
886 		}
887 
888 		if (voltage_tries == 5) {
889 			intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
890 			lt_dbg(intel_dp, dp_phy, "Same voltage tried 5 times\n");
891 			return false;
892 		}
893 
894 		if (max_vswing_reached) {
895 			intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
896 			lt_dbg(intel_dp, dp_phy, "Max Voltage Swing reached\n");
897 			return false;
898 		}
899 
900 		/* Update training set as requested by target */
901 		intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy,
902 					  link_status);
903 		if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
904 			lt_err(intel_dp, dp_phy, "Failed to update link training\n");
905 			return false;
906 		}
907 
908 		if (!intel_dp_adjust_request_changed(crtc_state, old_link_status, link_status))
909 			++voltage_tries;
910 		else
911 			voltage_tries = 1;
912 
913 		memcpy(old_link_status, link_status, sizeof(link_status));
914 
915 		if (intel_dp_link_max_vswing_reached(intel_dp, crtc_state))
916 			max_vswing_reached = true;
917 	}
918 
919 	intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
920 	lt_err(intel_dp, dp_phy, "Failed clock recovery %d times, giving up!\n",
921 	       max_cr_tries);
922 
923 	return false;
924 }
925 
926 /*
927  * Pick Training Pattern Sequence (TPS) for channel equalization. 128b/132b TPS2
928  * for UHBR+, TPS4 for HBR3 or for 1.4 devices that support it, TPS3 for HBR2 or
929  * 1.2 devices that support it, TPS2 otherwise.
930  */
931 static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
932 				     const struct intel_crtc_state *crtc_state,
933 				     enum drm_dp_phy dp_phy)
934 {
935 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
936 	bool source_tps3, sink_tps3, source_tps4, sink_tps4;
937 
938 	/* UHBR+ use separate 128b/132b TPS2 */
939 	if (intel_dp_is_uhbr(crtc_state))
940 		return DP_TRAINING_PATTERN_2;
941 
942 	/*
943 	 * TPS4 support is mandatory for all downstream devices that
944 	 * support HBR3. There are no known eDP panels that support
945 	 * TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1 specification.
946 	 * LTTPRs must support TPS4.
947 	 */
948 	source_tps4 = intel_dp_source_supports_tps4(i915);
949 	sink_tps4 = dp_phy != DP_PHY_DPRX ||
950 		    drm_dp_tps4_supported(intel_dp->dpcd);
951 	if (source_tps4 && sink_tps4) {
952 		return DP_TRAINING_PATTERN_4;
953 	} else if (crtc_state->port_clock == 810000) {
954 		if (!source_tps4)
955 			lt_dbg(intel_dp, dp_phy,
956 			       "8.1 Gbps link rate without source TPS4 support\n");
957 		if (!sink_tps4)
958 			lt_dbg(intel_dp, dp_phy,
959 			       "8.1 Gbps link rate without sink TPS4 support\n");
960 	}
961 
962 	/*
963 	 * TPS3 support is mandatory for downstream devices that
964 	 * support HBR2. However, not all sinks follow the spec.
965 	 */
966 	source_tps3 = intel_dp_source_supports_tps3(i915);
967 	sink_tps3 = dp_phy != DP_PHY_DPRX ||
968 		    drm_dp_tps3_supported(intel_dp->dpcd);
969 	if (source_tps3 && sink_tps3) {
970 		return  DP_TRAINING_PATTERN_3;
971 	} else if (crtc_state->port_clock >= 540000) {
972 		if (!source_tps3)
973 			lt_dbg(intel_dp, dp_phy,
974 			       ">=5.4/6.48 Gbps link rate without source TPS3 support\n");
975 		if (!sink_tps3)
976 			lt_dbg(intel_dp, dp_phy,
977 			       ">=5.4/6.48 Gbps link rate without sink TPS3 support\n");
978 	}
979 
980 	return DP_TRAINING_PATTERN_2;
981 }
982 
983 /*
984  * Perform the link training channel equalization phase on the given DP PHY
985  * using one of training pattern 2, 3 or 4 depending on the source and
986  * sink capabilities.
987  */
988 static bool
989 intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
990 					    const struct intel_crtc_state *crtc_state,
991 					    enum drm_dp_phy dp_phy)
992 {
993 	int tries;
994 	u32 training_pattern;
995 	u8 link_status[DP_LINK_STATUS_SIZE];
996 	bool channel_eq = false;
997 	int delay_us;
998 
999 	delay_us = drm_dp_read_channel_eq_delay(&intel_dp->aux,
1000 						intel_dp->dpcd, dp_phy,
1001 						intel_dp_is_uhbr(crtc_state));
1002 
1003 	training_pattern = intel_dp_training_pattern(intel_dp, crtc_state, dp_phy);
1004 	/* Scrambling is disabled for TPS2/3 and enabled for TPS4 */
1005 	if (training_pattern != DP_TRAINING_PATTERN_4)
1006 		training_pattern |= DP_LINK_SCRAMBLING_DISABLE;
1007 
1008 	/* channel equalization */
1009 	if (!intel_dp_set_link_train(intel_dp, crtc_state, dp_phy,
1010 				     training_pattern)) {
1011 		lt_err(intel_dp, dp_phy, "Failed to start channel equalization\n");
1012 		return false;
1013 	}
1014 
1015 	for (tries = 0; tries < 5; tries++) {
1016 		usleep_range(delay_us, 2 * delay_us);
1017 
1018 		if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
1019 						     link_status) < 0) {
1020 			lt_err(intel_dp, dp_phy, "Failed to get link status\n");
1021 			break;
1022 		}
1023 
1024 		/* Make sure clock is still ok */
1025 		if (!drm_dp_clock_recovery_ok(link_status,
1026 					      crtc_state->lane_count)) {
1027 			intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
1028 			lt_dbg(intel_dp, dp_phy,
1029 			       "Clock recovery check failed, cannot continue channel equalization\n");
1030 			break;
1031 		}
1032 
1033 		if (drm_dp_channel_eq_ok(link_status,
1034 					 crtc_state->lane_count)) {
1035 			channel_eq = true;
1036 			lt_dbg(intel_dp, dp_phy, "Channel EQ done. DP Training successful\n");
1037 			break;
1038 		}
1039 
1040 		/* Update training set as requested by target */
1041 		intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy,
1042 					  link_status);
1043 		if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
1044 			lt_err(intel_dp, dp_phy, "Failed to update link training\n");
1045 			break;
1046 		}
1047 	}
1048 
1049 	/* Try 5 times, else fail and try at lower BW */
1050 	if (tries == 5) {
1051 		intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
1052 		lt_dbg(intel_dp, dp_phy, "Channel equalization failed 5 times\n");
1053 	}
1054 
1055 	return channel_eq;
1056 }
1057 
1058 static bool intel_dp_disable_dpcd_training_pattern(struct intel_dp *intel_dp,
1059 						   enum drm_dp_phy dp_phy)
1060 {
1061 	int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy);
1062 	u8 val = DP_TRAINING_PATTERN_DISABLE;
1063 
1064 	return drm_dp_dpcd_write(&intel_dp->aux, reg, &val, 1) == 1;
1065 }
1066 
1067 static int
1068 intel_dp_128b132b_intra_hop(struct intel_dp *intel_dp,
1069 			    const struct intel_crtc_state *crtc_state)
1070 {
1071 	u8 sink_status;
1072 	int ret;
1073 
1074 	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_STATUS, &sink_status);
1075 	if (ret != 1) {
1076 		lt_dbg(intel_dp, DP_PHY_DPRX, "Failed to read sink status\n");
1077 		return ret < 0 ? ret : -EIO;
1078 	}
1079 
1080 	return sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION ? 1 : 0;
1081 }
1082 
1083 /**
1084  * intel_dp_stop_link_train - stop link training
1085  * @intel_dp: DP struct
1086  * @crtc_state: state for CRTC attached to the encoder
1087  *
1088  * Stop the link training of the @intel_dp port, disabling the training
1089  * pattern in the sink's DPCD, and disabling the test pattern symbol
1090  * generation on the port.
1091  *
1092  * What symbols are output on the port after this point is
1093  * platform specific: On DDI/VLV/CHV platforms it will be the idle pattern
1094  * with the pipe being disabled, on older platforms it's HW specific if/how an
1095  * idle pattern is generated, as the pipe is already enabled here for those.
1096  *
1097  * This function must be called after intel_dp_start_link_train().
1098  */
1099 void intel_dp_stop_link_train(struct intel_dp *intel_dp,
1100 			      const struct intel_crtc_state *crtc_state)
1101 {
1102 	intel_dp->link_trained = true;
1103 
1104 	intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
1105 	intel_dp_program_link_training_pattern(intel_dp, crtc_state, DP_PHY_DPRX,
1106 					       DP_TRAINING_PATTERN_DISABLE);
1107 
1108 	if (intel_dp_is_uhbr(crtc_state) &&
1109 	    wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) {
1110 		lt_dbg(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clearing\n");
1111 	}
1112 }
1113 
1114 static bool
1115 intel_dp_link_train_phy(struct intel_dp *intel_dp,
1116 			const struct intel_crtc_state *crtc_state,
1117 			enum drm_dp_phy dp_phy)
1118 {
1119 	bool ret = false;
1120 
1121 	if (!intel_dp_link_training_clock_recovery(intel_dp, crtc_state, dp_phy))
1122 		goto out;
1123 
1124 	if (!intel_dp_link_training_channel_equalization(intel_dp, crtc_state, dp_phy))
1125 		goto out;
1126 
1127 	ret = true;
1128 
1129 out:
1130 	lt_dbg(intel_dp, dp_phy,
1131 	       "Link Training %s at link rate = %d, lane count = %d\n",
1132 	       ret ? "passed" : "failed",
1133 	       crtc_state->port_clock, crtc_state->lane_count);
1134 
1135 	return ret;
1136 }
1137 
1138 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
1139 						     int link_rate,
1140 						     u8 lane_count)
1141 {
1142 	/* FIXME figure out what we actually want here */
1143 	const struct drm_display_mode *fixed_mode =
1144 		intel_panel_preferred_fixed_mode(intel_dp->attached_connector);
1145 	int mode_rate, max_rate;
1146 
1147 	mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
1148 	max_rate = intel_dp_max_link_data_rate(intel_dp, link_rate, lane_count);
1149 	if (mode_rate > max_rate)
1150 		return false;
1151 
1152 	return true;
1153 }
1154 
1155 static int reduce_link_rate(struct intel_dp *intel_dp, int current_rate)
1156 {
1157 	int rate_index;
1158 	int new_rate;
1159 
1160 	if (intel_dp->link.force_rate)
1161 		return -1;
1162 
1163 	rate_index = intel_dp_rate_index(intel_dp->common_rates,
1164 					 intel_dp->num_common_rates,
1165 					 current_rate);
1166 
1167 	if (rate_index <= 0)
1168 		return -1;
1169 
1170 	new_rate = intel_dp_common_rate(intel_dp, rate_index - 1);
1171 
1172 	/* TODO: Make switching from UHBR to non-UHBR rates work. */
1173 	if (drm_dp_is_uhbr_rate(current_rate) != drm_dp_is_uhbr_rate(new_rate))
1174 		return -1;
1175 
1176 	return new_rate;
1177 }
1178 
1179 static int reduce_lane_count(struct intel_dp *intel_dp, int current_lane_count)
1180 {
1181 	if (intel_dp->link.force_lane_count)
1182 		return -1;
1183 
1184 	if (current_lane_count == 1)
1185 		return -1;
1186 
1187 	return current_lane_count >> 1;
1188 }
1189 
1190 static int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
1191 						   const struct intel_crtc_state *crtc_state)
1192 {
1193 	int new_link_rate;
1194 	int new_lane_count;
1195 
1196 	if (intel_dp_is_edp(intel_dp) && !intel_dp->use_max_params) {
1197 		lt_dbg(intel_dp, DP_PHY_DPRX,
1198 		       "Retrying Link training for eDP with max parameters\n");
1199 		intel_dp->use_max_params = true;
1200 		return 0;
1201 	}
1202 
1203 	new_lane_count = crtc_state->lane_count;
1204 	new_link_rate = reduce_link_rate(intel_dp, crtc_state->port_clock);
1205 	if (new_link_rate < 0) {
1206 		new_lane_count = reduce_lane_count(intel_dp, crtc_state->lane_count);
1207 		new_link_rate = intel_dp_max_common_rate(intel_dp);
1208 	}
1209 
1210 	if (new_lane_count < 0)
1211 		return -1;
1212 
1213 	if (intel_dp_is_edp(intel_dp) &&
1214 	    !intel_dp_can_link_train_fallback_for_edp(intel_dp, new_link_rate, new_lane_count)) {
1215 		lt_dbg(intel_dp, DP_PHY_DPRX,
1216 		       "Retrying Link training for eDP with same parameters\n");
1217 		return 0;
1218 	}
1219 
1220 	lt_dbg(intel_dp, DP_PHY_DPRX,
1221 	       "Reducing link parameters from %dx%d to %dx%d\n",
1222 	       crtc_state->lane_count, crtc_state->port_clock,
1223 	       new_lane_count, new_link_rate);
1224 
1225 	intel_dp->link.max_rate = new_link_rate;
1226 	intel_dp->link.max_lane_count = new_lane_count;
1227 
1228 	return 0;
1229 }
1230 
1231 /* NOTE: @state is only valid for MST links and can be %NULL for SST. */
1232 static bool intel_dp_schedule_fallback_link_training(struct intel_atomic_state *state,
1233 						     struct intel_dp *intel_dp,
1234 						     const struct intel_crtc_state *crtc_state)
1235 {
1236 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1237 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
1238 
1239 	if (!intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base)) {
1240 		lt_dbg(intel_dp, DP_PHY_DPRX, "Link Training failed on disconnected sink.\n");
1241 		return true;
1242 	}
1243 
1244 	if (intel_dp->hobl_active) {
1245 		lt_dbg(intel_dp, DP_PHY_DPRX,
1246 		       "Link Training failed with HOBL active, not enabling it from now on\n");
1247 		intel_dp->hobl_failed = true;
1248 	} else if (intel_dp_get_link_train_fallback_values(intel_dp, crtc_state)) {
1249 		return false;
1250 	}
1251 
1252 	if (drm_WARN_ON(&i915->drm,
1253 			intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) &&
1254 			!state))
1255 		return false;
1256 
1257 	/* Schedule a Hotplug Uevent to userspace to start modeset */
1258 	intel_dp_queue_modeset_retry_for_link(state, encoder, crtc_state);
1259 
1260 	return true;
1261 }
1262 
1263 /* Perform the link training on all LTTPRs and the DPRX on a link. */
1264 static bool
1265 intel_dp_link_train_all_phys(struct intel_dp *intel_dp,
1266 			     const struct intel_crtc_state *crtc_state,
1267 			     int lttpr_count)
1268 {
1269 	bool ret = true;
1270 	int i;
1271 
1272 	for (i = lttpr_count - 1; i >= 0; i--) {
1273 		enum drm_dp_phy dp_phy = DP_PHY_LTTPR(i);
1274 
1275 		ret = intel_dp_link_train_phy(intel_dp, crtc_state, dp_phy);
1276 		intel_dp_disable_dpcd_training_pattern(intel_dp, dp_phy);
1277 
1278 		if (!ret)
1279 			break;
1280 	}
1281 
1282 	if (ret)
1283 		ret = intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
1284 
1285 	if (intel_dp->set_idle_link_train)
1286 		intel_dp->set_idle_link_train(intel_dp, crtc_state);
1287 
1288 	return ret;
1289 }
1290 
1291 /*
1292  * 128b/132b DP LANEx_EQ_DONE Sequence (DP 2.0 E11 3.5.2.16.1)
1293  */
1294 static bool
1295 intel_dp_128b132b_lane_eq(struct intel_dp *intel_dp,
1296 			  const struct intel_crtc_state *crtc_state)
1297 {
1298 	u8 link_status[DP_LINK_STATUS_SIZE];
1299 	int delay_us;
1300 	int try, max_tries = 20;
1301 	unsigned long deadline;
1302 	bool timeout = false;
1303 
1304 	/*
1305 	 * Reset signal levels. Start transmitting 128b/132b TPS1.
1306 	 *
1307 	 * Put DPRX and LTTPRs (if any) into intra-hop AUX mode by writing TPS1
1308 	 * in DP_TRAINING_PATTERN_SET.
1309 	 */
1310 	if (!intel_dp_reset_link_train(intel_dp, crtc_state, DP_PHY_DPRX,
1311 				       DP_TRAINING_PATTERN_1)) {
1312 		lt_err(intel_dp, DP_PHY_DPRX, "Failed to start 128b/132b TPS1\n");
1313 		return false;
1314 	}
1315 
1316 	delay_us = drm_dp_128b132b_read_aux_rd_interval(&intel_dp->aux);
1317 
1318 	/* Read the initial TX FFE settings. */
1319 	if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) {
1320 		lt_err(intel_dp, DP_PHY_DPRX, "Failed to read TX FFE presets\n");
1321 		return false;
1322 	}
1323 
1324 	/* Update signal levels and training set as requested. */
1325 	intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, link_status);
1326 	if (!intel_dp_update_link_train(intel_dp, crtc_state, DP_PHY_DPRX)) {
1327 		lt_err(intel_dp, DP_PHY_DPRX, "Failed to set initial TX FFE settings\n");
1328 		return false;
1329 	}
1330 
1331 	/* Start transmitting 128b/132b TPS2. */
1332 	if (!intel_dp_set_link_train(intel_dp, crtc_state, DP_PHY_DPRX,
1333 				     DP_TRAINING_PATTERN_2)) {
1334 		lt_err(intel_dp, DP_PHY_DPRX, "Failed to start 128b/132b TPS2\n");
1335 		return false;
1336 	}
1337 
1338 	/* Time budget for the LANEx_EQ_DONE Sequence */
1339 	deadline = jiffies + msecs_to_jiffies_timeout(400);
1340 
1341 	for (try = 0; try < max_tries; try++) {
1342 		usleep_range(delay_us, 2 * delay_us);
1343 
1344 		/*
1345 		 * The delay may get updated. The transmitter shall read the
1346 		 * delay before link status during link training.
1347 		 */
1348 		delay_us = drm_dp_128b132b_read_aux_rd_interval(&intel_dp->aux);
1349 
1350 		if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) {
1351 			lt_err(intel_dp, DP_PHY_DPRX, "Failed to read link status\n");
1352 			return false;
1353 		}
1354 
1355 		if (drm_dp_128b132b_link_training_failed(link_status)) {
1356 			intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
1357 			lt_err(intel_dp, DP_PHY_DPRX,
1358 			       "Downstream link training failure\n");
1359 			return false;
1360 		}
1361 
1362 		if (drm_dp_128b132b_lane_channel_eq_done(link_status, crtc_state->lane_count)) {
1363 			lt_dbg(intel_dp, DP_PHY_DPRX, "Lane channel eq done\n");
1364 			break;
1365 		}
1366 
1367 		if (timeout) {
1368 			intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
1369 			lt_err(intel_dp, DP_PHY_DPRX, "Lane channel eq timeout\n");
1370 			return false;
1371 		}
1372 
1373 		if (time_after(jiffies, deadline))
1374 			timeout = true; /* try one last time after deadline */
1375 
1376 		/* Update signal levels and training set as requested. */
1377 		intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, link_status);
1378 		if (!intel_dp_update_link_train(intel_dp, crtc_state, DP_PHY_DPRX)) {
1379 			lt_err(intel_dp, DP_PHY_DPRX, "Failed to update TX FFE settings\n");
1380 			return false;
1381 		}
1382 	}
1383 
1384 	if (try == max_tries) {
1385 		intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
1386 		lt_err(intel_dp, DP_PHY_DPRX, "Max loop count reached\n");
1387 		return false;
1388 	}
1389 
1390 	for (;;) {
1391 		if (time_after(jiffies, deadline))
1392 			timeout = true; /* try one last time after deadline */
1393 
1394 		if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) {
1395 			lt_err(intel_dp, DP_PHY_DPRX, "Failed to read link status\n");
1396 			return false;
1397 		}
1398 
1399 		if (drm_dp_128b132b_link_training_failed(link_status)) {
1400 			intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
1401 			lt_err(intel_dp, DP_PHY_DPRX, "Downstream link training failure\n");
1402 			return false;
1403 		}
1404 
1405 		if (drm_dp_128b132b_eq_interlane_align_done(link_status)) {
1406 			lt_dbg(intel_dp, DP_PHY_DPRX, "Interlane align done\n");
1407 			break;
1408 		}
1409 
1410 		if (timeout) {
1411 			intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
1412 			lt_err(intel_dp, DP_PHY_DPRX, "Interlane align timeout\n");
1413 			return false;
1414 		}
1415 
1416 		usleep_range(2000, 3000);
1417 	}
1418 
1419 	return true;
1420 }
1421 
1422 /*
1423  * 128b/132b DP LANEx_CDS_DONE Sequence (DP 2.0 E11 3.5.2.16.2)
1424  */
1425 static bool
1426 intel_dp_128b132b_lane_cds(struct intel_dp *intel_dp,
1427 			   const struct intel_crtc_state *crtc_state,
1428 			   int lttpr_count)
1429 {
1430 	u8 link_status[DP_LINK_STATUS_SIZE];
1431 	unsigned long deadline;
1432 
1433 	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
1434 			       DP_TRAINING_PATTERN_2_CDS) != 1) {
1435 		lt_err(intel_dp, DP_PHY_DPRX, "Failed to start 128b/132b TPS2 CDS\n");
1436 		return false;
1437 	}
1438 
1439 	/* Time budget for the LANEx_CDS_DONE Sequence */
1440 	deadline = jiffies + msecs_to_jiffies_timeout((lttpr_count + 1) * 20);
1441 
1442 	for (;;) {
1443 		bool timeout = false;
1444 
1445 		if (time_after(jiffies, deadline))
1446 			timeout = true; /* try one last time after deadline */
1447 
1448 		usleep_range(2000, 3000);
1449 
1450 		if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) {
1451 			lt_err(intel_dp, DP_PHY_DPRX, "Failed to read link status\n");
1452 			return false;
1453 		}
1454 
1455 		if (drm_dp_128b132b_eq_interlane_align_done(link_status) &&
1456 		    drm_dp_128b132b_cds_interlane_align_done(link_status) &&
1457 		    drm_dp_128b132b_lane_symbol_locked(link_status, crtc_state->lane_count)) {
1458 			lt_dbg(intel_dp, DP_PHY_DPRX, "CDS interlane align done\n");
1459 			break;
1460 		}
1461 
1462 		if (drm_dp_128b132b_link_training_failed(link_status)) {
1463 			intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
1464 			lt_err(intel_dp, DP_PHY_DPRX, "Downstream link training failure\n");
1465 			return false;
1466 		}
1467 
1468 		if (timeout) {
1469 			intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
1470 			lt_err(intel_dp, DP_PHY_DPRX, "CDS timeout\n");
1471 			return false;
1472 		}
1473 	}
1474 
1475 	return true;
1476 }
1477 
1478 /*
1479  * 128b/132b link training sequence. (DP 2.0 E11 SCR on link training.)
1480  */
1481 static bool
1482 intel_dp_128b132b_link_train(struct intel_dp *intel_dp,
1483 			     const struct intel_crtc_state *crtc_state,
1484 			     int lttpr_count)
1485 {
1486 	bool passed = false;
1487 
1488 	if (wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) {
1489 		lt_err(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clear\n");
1490 		return false;
1491 	}
1492 
1493 	if (intel_dp_128b132b_lane_eq(intel_dp, crtc_state) &&
1494 	    intel_dp_128b132b_lane_cds(intel_dp, crtc_state, lttpr_count))
1495 		passed = true;
1496 
1497 	lt_dbg(intel_dp, DP_PHY_DPRX,
1498 	       "128b/132b Link Training %s at link rate = %d, lane count = %d\n",
1499 	       passed ? "passed" : "failed",
1500 	       crtc_state->port_clock, crtc_state->lane_count);
1501 
1502 	return passed;
1503 }
1504 
1505 /**
1506  * intel_dp_start_link_train - start link training
1507  * @state: Atomic state
1508  * @intel_dp: DP struct
1509  * @crtc_state: state for CRTC attached to the encoder
1510  *
1511  * Start the link training of the @intel_dp port, scheduling a fallback
1512  * retraining with reduced link rate/lane parameters if the link training
1513  * fails.
1514  * After calling this function intel_dp_stop_link_train() must be called.
1515  *
1516  * NOTE: @state is only valid for MST links and can be %NULL for SST.
1517  */
1518 void intel_dp_start_link_train(struct intel_atomic_state *state,
1519 			       struct intel_dp *intel_dp,
1520 			       const struct intel_crtc_state *crtc_state)
1521 {
1522 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1523 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1524 	struct intel_encoder *encoder = &dig_port->base;
1525 	bool passed;
1526 	/*
1527 	 * Reinit the LTTPRs here to ensure that they are switched to
1528 	 * non-transparent mode. During an earlier LTTPR detection this
1529 	 * could've been prevented by an active link.
1530 	 */
1531 	int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp);
1532 
1533 	if (drm_WARN_ON(&i915->drm,
1534 			intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) &&
1535 			!state))
1536 		return;
1537 
1538 	if (lttpr_count < 0)
1539 		/* Still continue with enabling the port and link training. */
1540 		lttpr_count = 0;
1541 
1542 	intel_dp_prepare_link_train(intel_dp, crtc_state);
1543 
1544 	if (intel_dp_is_uhbr(crtc_state))
1545 		passed = intel_dp_128b132b_link_train(intel_dp, crtc_state, lttpr_count);
1546 	else
1547 		passed = intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count);
1548 
1549 	if (intel_dp->link.force_train_failure) {
1550 		intel_dp->link.force_train_failure--;
1551 		lt_dbg(intel_dp, DP_PHY_DPRX, "Forcing link training failure\n");
1552 	} else if (passed) {
1553 		intel_dp->link.seq_train_failures = 0;
1554 		intel_encoder_link_check_queue_work(encoder, 2000);
1555 		return;
1556 	}
1557 
1558 	intel_dp->link.seq_train_failures++;
1559 
1560 	/*
1561 	 * Ignore the link failure in CI
1562 	 *
1563 	 * In fixed enviroments like CI, sometimes unexpected long HPDs are
1564 	 * generated by the displays. If ignore_long_hpd flag is set, such long
1565 	 * HPDs are ignored. And probably as a consequence of these ignored
1566 	 * long HPDs, subsequent link trainings are failed resulting into CI
1567 	 * execution failures.
1568 	 *
1569 	 * For test cases which rely on the link training or processing of HPDs
1570 	 * ignore_long_hpd flag can unset from the testcase.
1571 	 */
1572 	if (i915->display.hotplug.ignore_long_hpd) {
1573 		lt_dbg(intel_dp, DP_PHY_DPRX, "Ignore the link failure\n");
1574 		return;
1575 	}
1576 
1577 	if (intel_dp->link.seq_train_failures < 2) {
1578 		intel_encoder_link_check_queue_work(encoder, 0);
1579 		return;
1580 	}
1581 
1582 	if (intel_dp_schedule_fallback_link_training(state, intel_dp, crtc_state))
1583 		return;
1584 
1585 	intel_dp->link.retrain_disabled = true;
1586 
1587 	if (!passed)
1588 		lt_err(intel_dp, DP_PHY_DPRX, "Can't reduce link training parameters after failure\n");
1589 	else
1590 		lt_dbg(intel_dp, DP_PHY_DPRX, "Can't reduce link training parameters after forced failure\n");
1591 }
1592 
1593 void intel_dp_128b132b_sdp_crc16(struct intel_dp *intel_dp,
1594 				 const struct intel_crtc_state *crtc_state)
1595 {
1596 	/*
1597 	 * VIDEO_DIP_CTL register bit 31 should be set to '0' to not
1598 	 * disable SDP CRC. This is applicable for Display version 13.
1599 	 * Default value of bit 31 is '0' hence discarding the write
1600 	 * TODO: Corrective actions on SDP corruption yet to be defined
1601 	 */
1602 	if (!intel_dp_is_uhbr(crtc_state))
1603 		return;
1604 
1605 	/* DP v2.0 SCR on SDP CRC16 for 128b/132b Link Layer */
1606 	drm_dp_dpcd_writeb(&intel_dp->aux,
1607 			   DP_SDP_ERROR_DETECTION_CONFIGURATION,
1608 			   DP_SDP_CRC16_128B132B_EN);
1609 
1610 	lt_dbg(intel_dp, DP_PHY_DPRX, "DP2.0 SDP CRC16 for 128b/132b enabled\n");
1611 }
1612 
1613 static struct intel_dp *intel_connector_to_intel_dp(struct intel_connector *connector)
1614 {
1615 	if (connector->mst_port)
1616 		return connector->mst_port;
1617 	else
1618 		return enc_to_intel_dp(intel_attached_encoder(connector));
1619 }
1620 
1621 static int i915_dp_force_link_rate_show(struct seq_file *m, void *data)
1622 {
1623 	struct intel_connector *connector = to_intel_connector(m->private);
1624 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1625 	struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
1626 	int current_rate = -1;
1627 	int force_rate;
1628 	int err;
1629 	int i;
1630 
1631 	err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
1632 	if (err)
1633 		return err;
1634 
1635 	if (intel_dp->link_trained)
1636 		current_rate = intel_dp->link_rate;
1637 	force_rate = intel_dp->link.force_rate;
1638 
1639 	drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
1640 
1641 	seq_printf(m, "%sauto%s",
1642 		   force_rate == 0 ? "[" : "",
1643 		   force_rate == 0 ? "]" : "");
1644 
1645 	for (i = 0; i < intel_dp->num_source_rates; i++)
1646 		seq_printf(m, " %s%d%s%s",
1647 			   intel_dp->source_rates[i] == force_rate ? "[" : "",
1648 			   intel_dp->source_rates[i],
1649 			   intel_dp->source_rates[i] == current_rate ? "*" : "",
1650 			   intel_dp->source_rates[i] == force_rate ? "]" : "");
1651 
1652 	seq_putc(m, '\n');
1653 
1654 	return 0;
1655 }
1656 
1657 static int parse_link_rate(struct intel_dp *intel_dp, const char __user *ubuf, size_t len)
1658 {
1659 	char *kbuf;
1660 	const char *p;
1661 	int rate;
1662 	int ret = 0;
1663 
1664 	kbuf = memdup_user_nul(ubuf, len);
1665 	if (IS_ERR(kbuf))
1666 		return PTR_ERR(kbuf);
1667 
1668 	p = strim(kbuf);
1669 
1670 	if (!strcmp(p, "auto")) {
1671 		rate = 0;
1672 	} else {
1673 		ret = kstrtoint(p, 0, &rate);
1674 		if (ret < 0)
1675 			goto out_free;
1676 
1677 		if (intel_dp_rate_index(intel_dp->source_rates,
1678 					intel_dp->num_source_rates,
1679 					rate) < 0)
1680 			ret = -EINVAL;
1681 	}
1682 
1683 out_free:
1684 	kfree(kbuf);
1685 
1686 	return ret < 0 ? ret : rate;
1687 }
1688 
1689 static ssize_t i915_dp_force_link_rate_write(struct file *file,
1690 					     const char __user *ubuf,
1691 					     size_t len, loff_t *offp)
1692 {
1693 	struct seq_file *m = file->private_data;
1694 	struct intel_connector *connector = to_intel_connector(m->private);
1695 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1696 	struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
1697 	int rate;
1698 	int err;
1699 
1700 	rate = parse_link_rate(intel_dp, ubuf, len);
1701 	if (rate < 0)
1702 		return rate;
1703 
1704 	err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
1705 	if (err)
1706 		return err;
1707 
1708 	intel_dp_reset_link_params(intel_dp);
1709 	intel_dp->link.force_rate = rate;
1710 
1711 	drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
1712 
1713 	*offp += len;
1714 
1715 	return len;
1716 }
1717 DEFINE_SHOW_STORE_ATTRIBUTE(i915_dp_force_link_rate);
1718 
1719 static int i915_dp_force_lane_count_show(struct seq_file *m, void *data)
1720 {
1721 	struct intel_connector *connector = to_intel_connector(m->private);
1722 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1723 	struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
1724 	int current_lane_count = -1;
1725 	int force_lane_count;
1726 	int err;
1727 	int i;
1728 
1729 	err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
1730 	if (err)
1731 		return err;
1732 
1733 	if (intel_dp->link_trained)
1734 		current_lane_count = intel_dp->lane_count;
1735 	force_lane_count = intel_dp->link.force_lane_count;
1736 
1737 	drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
1738 
1739 	seq_printf(m, "%sauto%s",
1740 		   force_lane_count == 0 ? "[" : "",
1741 		   force_lane_count == 0 ? "]" : "");
1742 
1743 	for (i = 1; i <= 4; i <<= 1)
1744 		seq_printf(m, " %s%d%s%s",
1745 			   i == force_lane_count ? "[" : "",
1746 			   i,
1747 			   i == current_lane_count ? "*" : "",
1748 			   i == force_lane_count ? "]" : "");
1749 
1750 	seq_putc(m, '\n');
1751 
1752 	return 0;
1753 }
1754 
1755 static int parse_lane_count(const char __user *ubuf, size_t len)
1756 {
1757 	char *kbuf;
1758 	const char *p;
1759 	int lane_count;
1760 	int ret = 0;
1761 
1762 	kbuf = memdup_user_nul(ubuf, len);
1763 	if (IS_ERR(kbuf))
1764 		return PTR_ERR(kbuf);
1765 
1766 	p = strim(kbuf);
1767 
1768 	if (!strcmp(p, "auto")) {
1769 		lane_count = 0;
1770 	} else {
1771 		ret = kstrtoint(p, 0, &lane_count);
1772 		if (ret < 0)
1773 			goto out_free;
1774 
1775 		switch (lane_count) {
1776 		case 1:
1777 		case 2:
1778 		case 4:
1779 			break;
1780 		default:
1781 			ret = -EINVAL;
1782 		}
1783 	}
1784 
1785 out_free:
1786 	kfree(kbuf);
1787 
1788 	return ret < 0 ? ret : lane_count;
1789 }
1790 
1791 static ssize_t i915_dp_force_lane_count_write(struct file *file,
1792 					      const char __user *ubuf,
1793 					      size_t len, loff_t *offp)
1794 {
1795 	struct seq_file *m = file->private_data;
1796 	struct intel_connector *connector = to_intel_connector(m->private);
1797 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1798 	struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
1799 	int lane_count;
1800 	int err;
1801 
1802 	lane_count = parse_lane_count(ubuf, len);
1803 	if (lane_count < 0)
1804 		return lane_count;
1805 
1806 	err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
1807 	if (err)
1808 		return err;
1809 
1810 	intel_dp_reset_link_params(intel_dp);
1811 	intel_dp->link.force_lane_count = lane_count;
1812 
1813 	drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
1814 
1815 	*offp += len;
1816 
1817 	return len;
1818 }
1819 DEFINE_SHOW_STORE_ATTRIBUTE(i915_dp_force_lane_count);
1820 
1821 static int i915_dp_max_link_rate_show(void *data, u64 *val)
1822 {
1823 	struct intel_connector *connector = to_intel_connector(data);
1824 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1825 	struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
1826 	int err;
1827 
1828 	err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
1829 	if (err)
1830 		return err;
1831 
1832 	*val = intel_dp->link.max_rate;
1833 
1834 	drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
1835 
1836 	return 0;
1837 }
1838 DEFINE_DEBUGFS_ATTRIBUTE(i915_dp_max_link_rate_fops, i915_dp_max_link_rate_show, NULL, "%llu\n");
1839 
1840 static int i915_dp_max_lane_count_show(void *data, u64 *val)
1841 {
1842 	struct intel_connector *connector = to_intel_connector(data);
1843 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1844 	struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
1845 	int err;
1846 
1847 	err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
1848 	if (err)
1849 		return err;
1850 
1851 	*val = intel_dp->link.max_lane_count;
1852 
1853 	drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
1854 
1855 	return 0;
1856 }
1857 DEFINE_DEBUGFS_ATTRIBUTE(i915_dp_max_lane_count_fops, i915_dp_max_lane_count_show, NULL, "%llu\n");
1858 
1859 static int i915_dp_force_link_training_failure_show(void *data, u64 *val)
1860 {
1861 	struct intel_connector *connector = to_intel_connector(data);
1862 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1863 	struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
1864 	int err;
1865 
1866 	err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
1867 	if (err)
1868 		return err;
1869 
1870 	*val = intel_dp->link.force_train_failure;
1871 
1872 	drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
1873 
1874 	return 0;
1875 }
1876 
1877 static int i915_dp_force_link_training_failure_write(void *data, u64 val)
1878 {
1879 	struct intel_connector *connector = to_intel_connector(data);
1880 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1881 	struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
1882 	int err;
1883 
1884 	if (val > 2)
1885 		return -EINVAL;
1886 
1887 	err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
1888 	if (err)
1889 		return err;
1890 
1891 	intel_dp->link.force_train_failure = val;
1892 
1893 	drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
1894 
1895 	return 0;
1896 }
1897 DEFINE_DEBUGFS_ATTRIBUTE(i915_dp_force_link_training_failure_fops,
1898 			 i915_dp_force_link_training_failure_show,
1899 			 i915_dp_force_link_training_failure_write, "%llu\n");
1900 
1901 static int i915_dp_force_link_retrain_show(void *data, u64 *val)
1902 {
1903 	struct intel_connector *connector = to_intel_connector(data);
1904 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1905 	struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
1906 	int err;
1907 
1908 	err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
1909 	if (err)
1910 		return err;
1911 
1912 	*val = intel_dp->link.force_retrain;
1913 
1914 	drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
1915 
1916 	return 0;
1917 }
1918 
1919 static int i915_dp_force_link_retrain_write(void *data, u64 val)
1920 {
1921 	struct intel_connector *connector = to_intel_connector(data);
1922 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1923 	struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
1924 	int err;
1925 
1926 	err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
1927 	if (err)
1928 		return err;
1929 
1930 	intel_dp->link.force_retrain = val;
1931 
1932 	drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
1933 
1934 	intel_hpd_trigger_irq(dp_to_dig_port(intel_dp));
1935 
1936 	return 0;
1937 }
1938 DEFINE_DEBUGFS_ATTRIBUTE(i915_dp_force_link_retrain_fops,
1939 			 i915_dp_force_link_retrain_show,
1940 			 i915_dp_force_link_retrain_write, "%llu\n");
1941 
1942 static int i915_dp_link_retrain_disabled_show(struct seq_file *m, void *data)
1943 {
1944 	struct intel_connector *connector = to_intel_connector(m->private);
1945 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1946 	struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
1947 	int err;
1948 
1949 	err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
1950 	if (err)
1951 		return err;
1952 
1953 	seq_printf(m, "%s\n", str_yes_no(intel_dp->link.retrain_disabled));
1954 
1955 	drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
1956 
1957 	return 0;
1958 }
1959 DEFINE_SHOW_ATTRIBUTE(i915_dp_link_retrain_disabled);
1960 
1961 void intel_dp_link_training_debugfs_add(struct intel_connector *connector)
1962 {
1963 	struct dentry *root = connector->base.debugfs_entry;
1964 
1965 	if (connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort &&
1966 	    connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
1967 		return;
1968 
1969 	debugfs_create_file("i915_dp_force_link_rate", 0644, root,
1970 			    connector, &i915_dp_force_link_rate_fops);
1971 
1972 	debugfs_create_file("i915_dp_force_lane_count", 0644, root,
1973 			    connector, &i915_dp_force_lane_count_fops);
1974 
1975 	debugfs_create_file("i915_dp_max_link_rate", 0444, root,
1976 			    connector, &i915_dp_max_link_rate_fops);
1977 
1978 	debugfs_create_file("i915_dp_max_lane_count", 0444, root,
1979 			    connector, &i915_dp_max_lane_count_fops);
1980 
1981 	debugfs_create_file("i915_dp_force_link_training_failure", 0644, root,
1982 			    connector, &i915_dp_force_link_training_failure_fops);
1983 
1984 	debugfs_create_file("i915_dp_force_link_retrain", 0644, root,
1985 			    connector, &i915_dp_force_link_retrain_fops);
1986 
1987 	debugfs_create_file("i915_dp_link_retrain_disabled", 0444, root,
1988 			    connector, &i915_dp_link_retrain_disabled_fops);
1989 }
1990